Merge pull request #1573 from restic/update-dps

Update dependencies
This commit is contained in:
Alexander Neumann 2018-01-24 19:43:32 +01:00
commit a6d4888d48
3435 changed files with 1318042 additions and 315692 deletions

69
Gopkg.lock generated
View File

@ -10,20 +10,20 @@
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
version = "v0.16.0"
revision = "767c40d6a2e058483c25fa193e963a22da17236d"
version = "v0.18.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = ["storage"]
revision = "7692b0cef22674113fcf71cc17ac3ccc1a7fef48"
version = "v11.2.2-beta"
revision = "eae258195456be76b2ec9ad2ee2ab63cdda365d9"
version = "v12.2.0-beta"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "c67b24a8e30d876542a85022ebbdecf0e5a935e8"
version = "v9.4.1"
revision = "c2a68353555b68de3ee8455a4fd3e890a0ac6d99"
version = "v9.8.1"
[[projects]]
name = "github.com/cenkalti/backoff"
@ -65,7 +65,7 @@
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
revision = "c65a0412e71e8b9b3bfd22925720d23c0f054237"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
@ -74,10 +74,10 @@
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/juju/ratelimit"
packages = ["."]
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
version = "1.0.1"
[[projects]]
branch = "master"
@ -92,22 +92,27 @@
version = "v0.2.1"
[[projects]]
branch = "master"
name = "github.com/minio/go-homedir"
name = "github.com/marstr/guid"
packages = ["."]
revision = "21304a94172ae3a09dee2cd86a12fb6f842138c7"
revision = "8bdf7d1a087ccc975cf37dd6507da50698fd19ca"
[[projects]]
name = "github.com/minio/minio-go"
packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"]
revision = "57a8ae886b49af6eb0d2c27c2d007ed2f71e1da5"
version = "4.0.3"
revision = "14f1d472d115bac5ca4804094aa87484a72ced61"
version = "4.0.6"
[[projects]]
branch = "master"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
[[projects]]
branch = "master"
name = "github.com/ncw/swift"
packages = ["."]
revision = "c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d"
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
[[projects]]
name = "github.com/pkg/errors"
@ -124,8 +129,8 @@
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
version = "1.0.0"
revision = "f6a9258a0f570c3a76681b897b6ded57cb0dfa88"
version = "1.2.0"
[[projects]]
name = "github.com/pkg/xattr"
@ -146,16 +151,16 @@
version = "v1.5"
[[projects]]
name = "github.com/satori/uuid"
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
version = "v1.1.0"
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
version = "v1.0.3"
revision = "d682213848ed68c0a260ca37d6dd5ace8423f5ba"
version = "v1.0.4"
[[projects]]
name = "github.com/spf13/cobra"
@ -172,20 +177,20 @@
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"]
revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","internal/chacha20","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"]
revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp"]
revision = "a8b9294777976932365dabb6640cf1468d95c70f"
packages = ["context","context/ctxhttp","idna","lex/httplex"]
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
revision = "b28fcf2b08a19742b43084fb40ab78ac6c3d8067"
[[projects]]
branch = "master"
@ -197,13 +202,19 @@
branch = "master"
name = "golang.org/x/sys"
packages = ["unix","windows"]
revision = "8b4580aae2a0dd0c231a45d3ccb8434ff533b840"
revision = "af50095a40f9041b3b38960738837185c26e9419"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
revision = "3a1d936b7575b82197a1fea0632218dd07b1e65c"
revision = "65b0d8655182691ad23b4fac11e6f7b897d9b634"
[[projects]]
name = "google.golang.org/appengine"
@ -215,7 +226,7 @@
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "287cf08546ab5e7e37d55a84f7ed3fd1db036de5"
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
[solve-meta]
analyzer-name = "dep"

View File

@ -1,10 +1,10 @@
sudo: false
language: go
go:
- 1.6
- 1.7
- 1.8
- 1.9
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
install:
- go get -v cloud.google.com/go/...
script:

View File

@ -24,8 +24,10 @@ Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Kunpei Sakai <namusyaka@gmail.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Magnus Hiie <magnus.hiie@gmail.com>
Mario Castro <mariocaster@gmail.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>

2
vendor/cloud.google.com/go/LICENSE generated vendored
View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

173
vendor/cloud.google.com/go/README.md generated vendored
View File

@ -33,6 +33,70 @@ make backwards-incompatible changes.
## News
_January 18, 2018_
*v0.18.0*
- bigquery:
- Marked stable.
- Schema inference of nullable fields supported.
- Added TimePartitioning to QueryConfig.
- firestore: Data provided to DocumentRef.Set with a Merge option can contain
Delete sentinels.
- logging: Clients can accept parent resources other than projects.
- pubsub:
- pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome.
- Support updating more subscription metadata: AckDeadline,
RetainAckedMessages and RetentionDuration.
- oslogin/apiv1beta: New client for the Cloud OS Login API.
- rpcreplay: A package for recording and replaying gRPC traffic.
- spanner:
- Add a ReadWithOptions that supports a row limit, as well as an index.
- Support query plan and execution statistics.
- Added [OpenCensus](http://opencensus.io) support.
- storage: Clarify checksum validation for gzipped files (it is not validated
when the file is served uncompressed).
_December 11, 2017_
*v0.17.0*
- firestore BREAKING CHANGES:
- Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update.
Change
`docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})`
to
`docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})`
Change
`docref.UpdateStruct(ctx, []string{"Field"}, aStruct)`
to
`docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})`
- Rename MergePaths to Merge; require args to be FieldPaths
- A value stored as an integer can be read into a floating-point field, and vice versa.
- bigtable/cmd/cbt:
- Support deleting a column.
- Add regex option for row read.
- spanner: Mark stable.
- storage:
- Add Reader.ContentEncoding method.
- Fix handling of SignedURL headers.
- bigquery:
- If Uploader.Put is called with no rows, it returns nil without making a
call.
- Schema inference supports the "nullable" option in struct tags for
non-required fields.
- TimePartitioning supports "Field".
_October 30, 2017_
*v0.16.0*
@ -53,7 +117,7 @@ _October 30, 2017_
- Support updating a table's view configuration.
- Fix uploading civil times with nanoseconds.
- storage:
- storage:
- Support PubSub notifications.
- Support Requester Pays buckets.
@ -72,99 +136,31 @@ _October 3, 2017_
- errors: This package has been removed. Use errorreporting.
_September 28, 2017_
*v0.14.0*
- bigquery BREAKING CHANGES:
- Standard SQL is the default for queries and views.
- `Table.Create` takes `TableMetadata` as a second argument, instead of
options.
- `Dataset.Create` takes `DatasetMetadata` as a second argument.
- `DatasetMetadata` field `ID` renamed to `FullID`
- `TableMetadata` field `ID` renamed to `FullID`
- Other bigquery changes:
- The client will append a random suffix to a provided job ID if you set
`AddJobIDSuffix` to true in a job config.
- Listing jobs is supported.
- Better retry logic.
- vision, language, speech: clients are now stable
- monitoring: client is now beta
- profiler:
- Rename InstanceName to Instance, ZoneName to Zone
- Auto-detect service name and version on AppEngine.
_September 8, 2017_
*v0.13.0*
- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
options to continue using Legacy SQL after the client switches its default
to Standard SQL.
- bigquery: Support for updating dataset labels.
- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
than the client's. DatasetsInProject is no longer needed and is deprecated.
- bigtable: Fail ListInstances when any zones fail.
- spanner: support decoding of slices of basic types (e.g. []string, []int64,
etc.)
- logging/logadmin: UpdateSink no longer creates a sink if it is missing
(actually a change to the underlying service, not the client)
- profiler: Service and ServiceVersion replace Target in Config.
_August 22, 2017_
*v0.12.0*
- pubsub: Subscription.Receive now uses streaming pull.
- pubsub: add Client.TopicInProject to access topics in a different project
than the client.
- errors: renamed errorreporting. The errors package will be removed shortly.
- datastore: improved retry behavior.
- bigquery: support updates to dataset metadata, with etags.
- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
- bigquery: generate all job IDs on the client.
- storage: support bucket lifecycle configurations.
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
## Supported APIs
Google API | Status | Package
---------------------------------|--------------|-----------------------------------------------------------
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref]
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
> **Alpha status**: the API is still being actively developed. As a
@ -519,6 +515,9 @@ for more information.
[cloud-language]: https://cloud.google.com/natural-language
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest
[cloud-speech]: https://cloud.google.com/speech
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
@ -529,13 +528,19 @@ for more information.
[cloud-translation]: https://cloud.google.com/translation
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation
[cloud-trace]: https://cloud.google.com/trace/
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace
[cloud-video]: https://cloud.google.com/video-intelligence/
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
[cloud-errors]: https://cloud.google.com/error-reporting/
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
[cloud-container]: https://cloud.google.com/containers/
[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1
[cloud-debugger]: https://cloud.google.com/debugger/
[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2
[cloud-dlp]: https://cloud.google.com/dlp/
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials

View File

@ -17,16 +17,15 @@
package main
import (
"context"
"encoding/json"
"flag"
"io/ioutil"
"log"
"time"
"google.golang.org/api/iterator"
"cloud.google.com/go/bigquery"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
func main() {

View File

@ -0,0 +1,67 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package datatransfer
import (
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)
import (
"fmt"
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = fmt.Sprintf
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestDataTransferServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", projectId, "us-central1")
var request = &datatransferpb.ListDataSourcesRequest{
Parent: formattedParent,
}
iter := c.ListDataSources(ctx, request)
if _, err := iter.Next(); err != nil && err != iterator.Done {
t.Error(err)
}
}

View File

@ -0,0 +1,601 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package datatransfer
import (
"math"
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
GetDataSource []gax.CallOption
ListDataSources []gax.CallOption
CreateTransferConfig []gax.CallOption
UpdateTransferConfig []gax.CallOption
DeleteTransferConfig []gax.CallOption
GetTransferConfig []gax.CallOption
ListTransferConfigs []gax.CallOption
ScheduleTransferRuns []gax.CallOption
GetTransferRun []gax.CallOption
DeleteTransferRun []gax.CallOption
ListTransferRuns []gax.CallOption
ListTransferLogs []gax.CallOption
CheckValidCreds []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
GetDataSource: retry[[2]string{"default", "idempotent"}],
ListDataSources: retry[[2]string{"default", "idempotent"}],
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
GetTransferConfig: retry[[2]string{"default", "idempotent"}],
ListTransferConfigs: retry[[2]string{"default", "idempotent"}],
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
GetTransferRun: retry[[2]string{"default", "idempotent"}],
DeleteTransferRun: retry[[2]string{"default", "idempotent"}],
ListTransferRuns: retry[[2]string{"default", "idempotent"}],
ListTransferLogs: retry[[2]string{"default", "idempotent"}],
CheckValidCreds: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with BigQuery Data Transfer API.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client datatransferpb.DataTransferServiceClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new data transfer service client.
//
// The Google BigQuery Data Transfer Service API enables BigQuery users to
// configure the transfer of their data from other Google Products into BigQuery.
// This service contains methods that are end user exposed. It backs up the
// frontend.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: datatransferpb.NewDataTransferServiceClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// GetDataSource retrieves a supported data source and returns its settings,
// which can be used for UI rendering.
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
var resp *datatransferpb.DataSource
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListDataSources lists supported data sources and returns their settings,
// which can be used for UI rendering.
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
it := &DataSourceIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
var resp *datatransferpb.ListDataSourcesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.DataSources, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// CreateTransferConfig creates a new data transfer configuration.
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateTransferConfig updates a data transfer configuration.
// All fields must be set, even if they are not updated.
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteTransferConfig deletes a data transfer configuration,
// including any associated transfer runs and logs.
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// GetTransferConfig returns information about a data transfer config.
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListTransferConfigs returns information about all data transfers in the project.
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
it := &TransferConfigIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
var resp *datatransferpb.ListTransferConfigsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferConfigs, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// ScheduleTransferRuns creates transfer runs for a time range [range_start_time, range_end_time].
// For each date - or whatever granularity the data source supports - in the
// range, one transfer run is created.
// Note that runs are created per UTC time in the time range.
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
var resp *datatransferpb.ScheduleTransferRunsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetTransferRun returns information about the particular transfer run.
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
var resp *datatransferpb.TransferRun
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteTransferRun deletes the specified transfer run.
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// ListTransferRuns returns information about running and completed jobs.
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
it := &TransferRunIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
var resp *datatransferpb.ListTransferRunsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferRuns, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// ListTransferLogs returns user facing log messages for the data transfer run.
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
it := &TransferMessageIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
var resp *datatransferpb.ListTransferLogsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferMessages, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// CheckValidCreds returns true if valid credentials exist for the given data source and
// requesting user.
// Some data sources doesn't support service account, so we need to talk to
// them on behalf of the end user. This API just checks whether we have OAuth
// token for the particular user, which is a pre-requisite before user can
// create a transfer config.
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
var resp *datatransferpb.CheckValidCredsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DataSourceIterator manages a stream of *datatransferpb.DataSource.
type DataSourceIterator struct {
items []*datatransferpb.DataSource
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
var item *datatransferpb.DataSource
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *DataSourceIterator) bufLen() int {
return len(it.items)
}
func (it *DataSourceIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
type TransferConfigIterator struct {
items []*datatransferpb.TransferConfig
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
var item *datatransferpb.TransferConfig
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferConfigIterator) bufLen() int {
return len(it.items)
}
func (it *TransferConfigIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
type TransferMessageIterator struct {
items []*datatransferpb.TransferMessage
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
var item *datatransferpb.TransferMessage
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferMessageIterator) bufLen() int {
return len(it.items)
}
func (it *TransferMessageIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
type TransferRunIterator struct {
items []*datatransferpb.TransferRun
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
var item *datatransferpb.TransferRun
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferRunIterator) bufLen() int {
return len(it.items)
}
func (it *TransferRunIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

View File

@ -0,0 +1,288 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package datatransfer_test
import (
"cloud.google.com/go/bigquery/datatransfer/apiv1"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_GetDataSource() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetDataSourceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetDataSource(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListDataSources() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListDataSourcesRequest{
// TODO: Fill request struct fields.
}
it := c.ListDataSources(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_CreateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.CreateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_UpdateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.UpdateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.DeleteTransferConfigRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_GetTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListTransferConfigs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferConfigsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferConfigs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_ScheduleTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ScheduleTransferRunsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ScheduleTransferRuns(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_GetTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetTransferRunRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.DeleteTransferRunRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_ListTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferRunsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferRuns(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_ListTransferLogs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferLogsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferLogs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_CheckValidCreds() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.CheckValidCredsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CheckValidCreds(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

View File

@ -0,0 +1,49 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package datatransfer is an auto-generated package for the
// BigQuery Data Transfer API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Transfers data from partner SaaS applications to Google BigQuery on a
// scheduled, managed basis.
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,135 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datatransfer
// ProjectPath returns the path for the project resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s", project)
// instead.
func ProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}
// LocationPath returns the path for the location resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s", project, location)
// instead.
func LocationPath(project, location string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
""
}
// LocationDataSourcePath returns the path for the location data source resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource)
// instead.
func LocationDataSourcePath(project, location, dataSource string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/dataSources/" +
dataSource +
""
}
// LocationTransferConfigPath returns the path for the location transfer config resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig)
// instead.
func LocationTransferConfigPath(project, location, transferConfig string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
""
}
// LocationRunPath returns the path for the location run resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run)
// instead.
func LocationRunPath(project, location, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}
// DataSourcePath returns the path for the data source resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource)
// instead.
func DataSourcePath(project, dataSource string) string {
return "" +
"projects/" +
project +
"/dataSources/" +
dataSource +
""
}
// TransferConfigPath returns the path for the transfer config resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig)
// instead.
func TransferConfigPath(project, transferConfig string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
""
}
// RunPath returns the path for the run resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run)
// instead.
func RunPath(project, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}

View File

@ -150,8 +150,9 @@ There are two ways to construct schemas with this package.
You can build a schema by hand, like so:
schema1 := bigquery.Schema{
&bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType},
&bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
{Name: "Name", Required: true, Type: bigquery.StringFieldType},
{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
{Name: "Optional", Required: false, Type: bigquery.IntegerFieldType},
}
Or you can infer the schema from a struct:
@ -159,6 +160,7 @@ Or you can infer the schema from a struct:
type student struct {
Name string
Grades []int
Optional bigquery.NullInt64
}
schema2, err := bigquery.InferSchema(student{})
if err != nil {
@ -166,19 +168,24 @@ Or you can infer the schema from a struct:
}
// schema1 and schema2 are identical.
Struct inference supports tags like those of the encoding/json package,
so you can change names or ignore fields:
Struct inference supports tags like those of the encoding/json package, so you can
change names, ignore fields, or mark a field as nullable (non-required). Fields
declared as on of the Null types (NullInt64, NullFloat64, NullString, NullBool,
NullTimestamp, NullDate, NullTime and NullDateTime) are automatically inferred as
nullable, so the "nullable" tag is only needed for []byte and pointer-to-struct
fields.
type student2 struct {
Name string `bigquery:"full_name"`
Grades []int
Secret string `bigquery:"-"`
Name string `bigquery:"full_name"`
Grades []int
Secret string `bigquery:"-"`
Optional []byte `bigquery:",nullable"
}
schema3, err := bigquery.InferSchema(student2{})
if err != nil {
// TODO: Handle error.
}
// schema3 has fields "full_name" and "Grade".
// schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional".
Having constructed a schema, you can create a table with it like so:

View File

@ -395,10 +395,12 @@ func ExampleInferSchema() {
func ExampleInferSchema_tags() {
type Item struct {
Name string
Size float64
Count int `bigquery:"number"`
Secret []byte `bigquery:"-"`
Name string
Size float64
Count int `bigquery:"number"`
Secret []byte `bigquery:"-"`
Optional bigquery.NullBool
OptBytes []byte `bigquery:",nullable"`
}
schema, err := bigquery.InferSchema(Item{})
if err != nil {
@ -406,12 +408,14 @@ func ExampleInferSchema_tags() {
// TODO: Handle error.
}
for _, fs := range schema {
fmt.Println(fs.Name, fs.Type)
fmt.Println(fs.Name, fs.Type, fs.Required)
}
// Output:
// Name STRING
// Size FLOAT
// number INTEGER
// Name STRING true
// Size FLOAT true
// number INTEGER true
// Optional BOOLEAN false
// OptBytes BYTES false
}
func ExampleTable_Create() {
@ -755,3 +759,27 @@ func ExampleUploader_Put_struct() {
// TODO: Handle error.
}
}
func ExampleUploader_Put_valuesSaver() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
u := client.Dataset("my_dataset").Table("my_table").Uploader()
var vss []*bigquery.ValuesSaver
for i, name := range []string{"n1", "n2", "n3"} {
// Assume schema holds the table's schema.
vss = append(vss, &bigquery.ValuesSaver{
Schema: schema,
InsertID: name,
Row: []bigquery.Value{name, int64(i)},
})
}
if err := u.Put(ctx, vss); err != nil {
// TODO: Handle error.
}
}

View File

@ -53,8 +53,10 @@ var (
}},
}
testTableExpiration time.Time
datasetIDs = testutil.NewUIDSpaceSep("dataset", '_')
tableIDs = testutil.NewUIDSpaceSep("table", '_')
// BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
// with underscores.
datasetIDs = testutil.NewUIDSpaceSep("dataset", '_')
tableIDs = testutil.NewUIDSpaceSep("table", '_')
)
func TestMain(m *testing.M) {
@ -94,8 +96,6 @@ func initIntegrationTest() func() {
if err != nil {
log.Fatalf("storage.NewClient: %v", err)
}
// BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
// with underscores.
dataset = client.Dataset(datasetIDs.New())
if err := dataset.Create(ctx, nil); err != nil {
log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err)
@ -197,16 +197,28 @@ func TestIntegration_TableMetadata(t *testing.T) {
// Create tables that have time partitioning
partitionCases := []struct {
timePartitioning TimePartitioning
expectedExpiration time.Duration
timePartitioning TimePartitioning
wantExpiration time.Duration
wantField string
}{
{TimePartitioning{}, time.Duration(0)},
{TimePartitioning{time.Second}, time.Second},
{TimePartitioning{}, time.Duration(0), ""},
{TimePartitioning{Expiration: time.Second}, time.Second, ""},
{
TimePartitioning{
Expiration: time.Second,
Field: "date",
}, time.Second, "date"},
}
schema2 := Schema{
{Name: "name", Type: StringFieldType},
{Name: "date", Type: DateFieldType},
}
for i, c := range partitionCases {
table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
err = table.Create(context.Background(), &TableMetadata{
Schema: schema,
Schema: schema2,
TimePartitioning: &c.timePartitioning,
ExpirationTime: time.Now().Add(5 * time.Minute),
})
@ -220,7 +232,10 @@ func TestIntegration_TableMetadata(t *testing.T) {
}
got := md.TimePartitioning
want := &TimePartitioning{c.expectedExpiration}
want := &TimePartitioning{
Expiration: c.wantExpiration,
Field: c.wantField,
}
if !testutil.Equal(got, want) {
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
}
@ -249,7 +264,7 @@ func TestIntegration_DatasetCreate(t *testing.T) {
t.Errorf("location: got %q, want %q", got, want)
}
if err := ds.Delete(ctx); err != nil {
t.Fatalf("deleting dataset %s: %v", ds, err)
t.Fatalf("deleting dataset %v: %v", ds, err)
}
}
@ -585,7 +600,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
}
// Test reading directly into a []Value.
valueLists, err := readAll(table.Read(ctx))
valueLists, schema, _, err := readAll(table.Read(ctx))
if err != nil {
t.Fatal(err)
}
@ -595,6 +610,9 @@ func TestIntegration_UploadAndRead(t *testing.T) {
if err := it.Next(&got); err != nil {
t.Fatal(err)
}
if !testutil.Equal(it.Schema, schema) {
t.Fatalf("got schema %v, want %v", it.Schema, schema)
}
want := []Value(vl)
if !testutil.Equal(got, want) {
t.Errorf("%d: got %v, want %v", i, got, want)
@ -656,6 +674,10 @@ type TestStruct struct {
RecordArray []SubTestStruct
}
// Round times to the microsecond for comparison purposes.
var roundToMicros = cmp.Transformer("RoundToMicros",
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
func TestIntegration_UploadAndReadStructs(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
@ -755,15 +777,11 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
}
sort.Sort(byName(got))
// Compare times to the microsecond.
timeEq := func(x, y time.Time) bool {
return x.Round(time.Microsecond).Equal(y.Round(time.Microsecond))
}
// BigQuery does not elide nils. It reports an error for nil fields.
for i, g := range got {
if i >= len(want) {
t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
} else if diff := testutil.Diff(g, want[i], cmp.Comparer(timeEq)); diff != "" {
} else if diff := testutil.Diff(g, want[i], roundToMicros); diff != "" {
t.Errorf("%d: got=-, want=+:\n%s", i, diff)
}
}
@ -775,6 +793,69 @@ func (b byName) Len() int { return len(b) }
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
func TestIntegration_UploadAndReadNullable(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctm := civil.Time{15, 4, 5, 6000}
cdt := civil.DateTime{testDate, ctm}
testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
testUploadAndReadNullable(t, testStructNullable{
String: NullString{"x", true},
Bytes: []byte{1, 2, 3},
Integer: NullInt64{1, true},
Float: NullFloat64{2.3, true},
Boolean: NullBool{true, true},
Timestamp: NullTimestamp{testTimestamp, true},
Date: NullDate{testDate, true},
Time: NullTime{ctm, true},
DateTime: NullDateTime{cdt, true},
Record: &subNullable{X: NullInt64{4, true}},
},
[]Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, ctm, cdt, []Value{int64(4)}})
}
func testUploadAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) {
ctx := context.Background()
table := newTable(t, testStructNullableSchema)
defer table.Delete(ctx)
// Populate the table.
upl := table.Uploader()
if err := upl.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil {
t.Fatal(putError(err))
}
// Wait until the data has been uploaded. This can take a few seconds, according
// to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
if err := waitForRow(ctx, table); err != nil {
t.Fatal(err)
}
// Read into a []Value.
iter := table.Read(ctx)
gotRows, _, _, err := readAll(iter)
if err != nil {
t.Fatal(err)
}
if len(gotRows) != 1 {
t.Fatalf("got %d rows, want 1", len(gotRows))
}
if diff := testutil.Diff(gotRows[0], wantRow, roundToMicros); diff != "" {
t.Error(diff)
}
// Read into a struct.
want := ts
var sn testStructNullable
it := table.Read(ctx)
if err := it.Next(&sn); err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(sn, want, roundToMicros); diff != "" {
t.Error(diff)
}
}
func TestIntegration_TableUpdate(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
@ -940,7 +1021,7 @@ func TestIntegration_Load(t *testing.T) {
if err := wait(ctx, job); err != nil {
t.Fatal(err)
}
checkRead(t, "reader load", table.Read(ctx), wantRows)
checkReadAndTotalRows(t, "reader load", table.Read(ctx), wantRows)
}
@ -1270,7 +1351,7 @@ func TestIntegration_ExtractExternal(t *testing.T) {
if err != nil {
t.Fatal(err)
}
checkRead(t, "external query", iter, wantRows)
checkReadAndTotalRows(t, "external query", iter, wantRows)
// Make a table pointing to the file, and query it.
// BigQuery does not allow a Table.Read on an external table.
@ -1288,7 +1369,7 @@ func TestIntegration_ExtractExternal(t *testing.T) {
if err != nil {
t.Fatal(err)
}
checkRead(t, "external table", iter, wantRows)
checkReadAndTotalRows(t, "external table", iter, wantRows)
// While we're here, check that the table metadata is correct.
md, err := table.Metadata(ctx)
@ -1452,19 +1533,28 @@ func newTable(t *testing.T, s Schema) *Table {
}
func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) {
if msg2, ok := compareRead(it, want); !ok {
if msg2, ok := compareRead(it, want, false); !ok {
t.Errorf("%s: %s", msg, msg2)
}
}
func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) {
got, err := readAll(it)
func checkReadAndTotalRows(t *testing.T, msg string, it *RowIterator, want [][]Value) {
if msg2, ok := compareRead(it, want, true); !ok {
t.Errorf("%s: %s", msg, msg2)
}
}
func compareRead(it *RowIterator, want [][]Value, compareTotalRows bool) (msg string, ok bool) {
got, _, totalRows, err := readAll(it)
if err != nil {
return err.Error(), false
}
if len(got) != len(want) {
return fmt.Sprintf("got %d rows, want %d", len(got), len(want)), false
}
if compareTotalRows && len(got) != int(totalRows) {
return fmt.Sprintf("got %d rows, but totalRows = %d", len(got), totalRows), false
}
sort.Sort(byCol0(got))
for i, r := range got {
gotRow := []Value(r)
@ -1476,18 +1566,24 @@ func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) {
return "", true
}
func readAll(it *RowIterator) ([][]Value, error) {
var rows [][]Value
func readAll(it *RowIterator) ([][]Value, Schema, uint64, error) {
var (
rows [][]Value
schema Schema
totalRows uint64
)
for {
var vals []Value
err := it.Next(&vals)
if err == iterator.Done {
return rows, nil
return rows, schema, totalRows, nil
}
if err != nil {
return nil, err
return nil, nil, 0, err
}
rows = append(rows, vals)
schema = it.Schema
totalRows = it.TotalRows
}
}

View File

@ -48,9 +48,14 @@ type RowIterator struct {
// is also set, StartIndex is ignored.
StartIndex uint64
rows [][]Value
// The schema of the table. Available after the first call to Next.
Schema Schema
schema Schema // populated on first call to fetch
// The total number of rows in the result. Available after the first call to Next.
// May be zero just after rows were inserted.
TotalRows uint64
rows [][]Value
structLoader structLoader // used to populate a pointer to a struct
}
@ -88,8 +93,11 @@ type RowIterator struct {
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
// All calls to Next on the same iterator must use the same struct type.
//
// It is an error to attempt to read a BigQuery NULL value into a struct field.
// If your table contains NULLs, use a *[]Value or *map[string]Value.
// It is an error to attempt to read a BigQuery NULL value into a struct field,
// unless the field is of type []byte or is one of the special Null types: NullInt64,
// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
// table with NULLs.
func (it *RowIterator) Next(dst interface{}) error {
var vl ValueLoader
switch dst := dst.(type) {
@ -113,12 +121,12 @@ func (it *RowIterator) Next(dst interface{}) error {
if vl == nil {
// This can only happen if dst is a pointer to a struct. We couldn't
// set vl above because we need the schema.
if err := it.structLoader.set(dst, it.schema); err != nil {
if err := it.structLoader.set(dst, it.Schema); err != nil {
return err
}
vl = &it.structLoader
}
return vl.Load(row, it.schema)
return vl.Load(row, it.Schema)
}
func isStructPtr(x interface{}) bool {
@ -130,12 +138,13 @@ func isStructPtr(x interface{}) bool {
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
res, err := it.pf(it.ctx, it.table, it.schema, it.StartIndex, int64(pageSize), pageToken)
res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken)
if err != nil {
return "", err
}
it.rows = append(it.rows, res.rows...)
it.schema = res.schema
it.Schema = res.schema
it.TotalRows = res.totalRows
return res.pageToken, nil
}

View File

@ -64,6 +64,7 @@ func TestIterator(t *testing.T) {
want [][]Value
wantErr error
wantSchema Schema
wantTotalRows uint64
}{
{
desc: "Iteration over single empty page",
@ -87,11 +88,13 @@ func TestIterator(t *testing.T) {
pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema,
totalRows: 4,
},
},
},
want: [][]Value{{1, 2}, {11, 12}},
wantSchema: iiSchema,
want: [][]Value{{1, 2}, {11, 12}},
wantSchema: iiSchema,
wantTotalRows: 4,
},
{
desc: "Iteration over single page with different schema",
@ -115,6 +118,7 @@ func TestIterator(t *testing.T) {
pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema,
totalRows: 4,
},
},
"a": {
@ -122,11 +126,13 @@ func TestIterator(t *testing.T) {
pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema,
totalRows: 4,
},
},
},
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: iiSchema,
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
wantSchema: iiSchema,
wantTotalRows: 4,
},
{
desc: "Server response includes empty page",
@ -240,7 +246,7 @@ func TestIterator(t *testing.T) {
}
it := newRowIterator(context.Background(), nil, pf.fetchPage)
it.PageInfo().Token = tc.pageToken
values, schema, err := consumeRowIterator(it)
values, schema, totalRows, err := consumeRowIterator(it)
if err != tc.wantErr {
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
}
@ -250,35 +256,31 @@ func TestIterator(t *testing.T) {
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
}
if totalRows != tc.wantTotalRows {
t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows)
}
}
}
type valueListWithSchema struct {
vals valueList
schema Schema
}
func (v *valueListWithSchema) Load(vs []Value, s Schema) error {
v.vals.Load(vs, s)
v.schema = s
return nil
}
// consumeRowIterator reads the schema and all values from a RowIterator and returns them.
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) {
var got [][]Value
var schema Schema
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) {
var (
got [][]Value
schema Schema
totalRows uint64
)
for {
var vls valueListWithSchema
var vls []Value
err := it.Next(&vls)
if err == iterator.Done {
return got, schema, nil
return got, schema, totalRows, nil
}
if err != nil {
return got, schema, err
return got, schema, totalRows, err
}
got = append(got, vls.vals)
schema = vls.schema
got = append(got, vls)
schema = it.Schema
totalRows = it.TotalRows
}
}
@ -333,7 +335,7 @@ func TestNextAfterFinished(t *testing.T) {
}
it := newRowIterator(context.Background(), nil, pf.fetchPage)
values, _, err := consumeRowIterator(it)
values, _, _, err := consumeRowIterator(it)
if err != nil {
t.Fatal(err)
}
@ -355,7 +357,7 @@ func TestIteratorNextTypes(t *testing.T) {
struct{}{},
} {
if err := it.Next(v); err == nil {
t.Error("%v: want error, got nil", v)
t.Errorf("%v: want error, got nil", v)
}
}
}

View File

@ -271,7 +271,7 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin
}
dt := bqToTable(destTable, j.c)
it := newRowIterator(ctx, dt, pf)
it.schema = schema
it.Schema = schema
return it, nil
}

169
vendor/cloud.google.com/go/bigquery/nulls.go generated vendored Normal file
View File

@ -0,0 +1,169 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/json"
"fmt"
"reflect"
"time"
"cloud.google.com/go/civil"
)
// NullInt64 represents a BigQuery INT64 that may be NULL.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL.
}
func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) }
// NullString represents a BigQuery STRING that may be NULL.
type NullString struct {
StringVal string
Valid bool // Valid is true if StringVal is not NULL.
}
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL.
}
func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) }
// NullBool represents a BigQuery BOOL that may be NULL.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL.
}
func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) }
// NullTimestamp represents a BigQuery TIMESTAMP that may be null.
type NullTimestamp struct {
Timestamp time.Time
Valid bool // Valid is true if Time is not NULL.
}
func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) }
// NullDate represents a BigQuery DATE that may be null.
type NullDate struct {
Date civil.Date
Valid bool // Valid is true if Date is not NULL.
}
func (n NullDate) String() string { return nullstr(n.Valid, n.Date) }
// NullTime represents a BigQuery TIME that may be null.
type NullTime struct {
Time civil.Time
Valid bool // Valid is true if Time is not NULL.
}
func (n NullTime) String() string {
if !n.Valid {
return "<null>"
}
return CivilTimeString(n.Time)
}
// NullDateTime represents a BigQuery DATETIME that may be null.
type NullDateTime struct {
DateTime civil.DateTime
Valid bool // Valid is true if DateTime is not NULL.
}
func (n NullDateTime) String() string {
if !n.Valid {
return "<null>"
}
return CivilDateTimeString(n.DateTime)
}
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }
func (n NullTime) MarshalJSON() ([]byte, error) {
if !n.Valid {
return jsonNull, nil
}
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
}
func (n NullDateTime) MarshalJSON() ([]byte, error) {
if !n.Valid {
return jsonNull, nil
}
return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil
}
func nullstr(valid bool, v interface{}) string {
if !valid {
return "NULL"
}
return fmt.Sprint(v)
}
var jsonNull = []byte("null")
func nulljson(valid bool, v interface{}) ([]byte, error) {
if !valid {
return jsonNull, nil
}
return json.Marshal(v)
}
var (
typeOfNullInt64 = reflect.TypeOf(NullInt64{})
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
typeOfNullBool = reflect.TypeOf(NullBool{})
typeOfNullString = reflect.TypeOf(NullString{})
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
typeOfNullDate = reflect.TypeOf(NullDate{})
typeOfNullTime = reflect.TypeOf(NullTime{})
typeOfNullDateTime = reflect.TypeOf(NullDateTime{})
)
func nullableFieldType(t reflect.Type) FieldType {
switch t {
case typeOfNullInt64:
return IntegerFieldType
case typeOfNullFloat64:
return FloatFieldType
case typeOfNullBool:
return BooleanFieldType
case typeOfNullString:
return StringFieldType
case typeOfNullTimestamp:
return TimestampFieldType
case typeOfNullDate:
return DateFieldType
case typeOfNullTime:
return TimeFieldType
case typeOfNullDateTime:
return DateTimeFieldType
default:
return ""
}
}

53
vendor/cloud.google.com/go/bigquery/nulls_test.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/json"
"testing"
)
func TestNullsJSON(t *testing.T) {
for _, test := range []struct {
in interface{}
want string
}{
{NullInt64{Valid: true, Int64: 3}, `3`},
{NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
{NullBool{Valid: true, Bool: true}, `true`},
{NullString{Valid: true, StringVal: "foo"}, `"foo"`},
{NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
{NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
{NullTime{Valid: true, Time: testTime}, `"07:50:22.000000"`},
{NullDateTime{Valid: true, DateTime: testDateTime}, `"2016-11-05 07:50:22.000000"`},
{NullInt64{}, `null`},
{NullFloat64{}, `null`},
{NullBool{}, `null`},
{NullString{}, `null`},
{NullTimestamp{}, `null`},
{NullDate{}, `null`},
{NullTime{}, `null`},
{NullDateTime{}, `null`},
} {
bytes, err := json.Marshal(test.in)
if err != nil {
t.Fatal(err)
}
if got, want := string(bytes), test.want; got != want {
t.Errorf("%#v: got %s, want %s", test.in, got, want)
}
}
}

View File

@ -37,17 +37,24 @@ var (
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
)
const nullableTagOption = "nullable"
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
if s := t.Get("bigquery"); s != "" {
if s == "-" {
return "", false, nil, nil
}
if !validFieldName.MatchString(s) {
return "", false, nil, errInvalidFieldName
}
return s, true, nil, nil
name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
if err != nil {
return "", false, nil, err
}
return "", true, nil, nil
if name != "" && !validFieldName.MatchString(name) {
return "", false, nil, errInvalidFieldName
}
for _, opt := range opts {
if opt != nullableTagOption {
return "", false, nil, fmt.Errorf(
"bigquery: invalid tag option %q. The only valid option is %q",
opt, nullableTagOption)
}
}
return name, keep, opts, nil
}
var fieldCache = fields.NewCache(bqTagParser, nil, nil)

View File

@ -264,19 +264,18 @@ func TestConvertParamValue(t *testing.T) {
}
func TestIntegration_ScalarParam(t *testing.T) {
timeEqualMicrosec := cmp.Comparer(func(t1, t2 time.Time) bool {
return t1.Round(time.Microsecond).Equal(t2.Round(time.Microsecond))
})
roundToMicros := cmp.Transformer("RoundToMicros",
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
c := getClient(t)
for _, test := range scalarTests {
gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotData, test.val, timeEqualMicrosec) {
if !testutil.Equal(gotData, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val)
}
if !testutil.Equal(gotParam, test.val, timeEqualMicrosec) {
if !testutil.Equal(gotParam, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val)
}
}

View File

@ -100,6 +100,10 @@ type QueryConfig struct {
// It is illegal to mix positional and named syntax.
Parameters []QueryParameter
// TimePartitioning specifies time-based partitioning
// for the destination table.
TimePartitioning *TimePartitioning
// The labels associated with this job.
Labels map[string]string
@ -121,6 +125,7 @@ func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
AllowLargeResults: qc.AllowLargeResults,
Priority: string(qc.Priority),
MaximumBytesBilled: qc.MaxBytesBilled,
TimePartitioning: qc.TimePartitioning.toBQ(),
}
if len(qc.TableDefinitions) > 0 {
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
@ -188,6 +193,7 @@ func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
MaxBytesBilled: qq.MaximumBytesBilled,
UseLegacySQL: qq.UseLegacySql,
UseStandardSQL: !qq.UseLegacySql,
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
}
if len(qq.TableDefinitions) > 0 {
qc.TableDefinitions = make(map[string]ExternalData)

View File

@ -16,6 +16,7 @@ package bigquery
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
@ -350,6 +351,7 @@ func TestConfiguringQuery(t *testing.T) {
query.JobID = "ajob"
query.DefaultProjectID = "def-project-id"
query.DefaultDatasetID = "def-dataset-id"
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
// Note: Other configuration fields are tested in other tests above.
// A lot of that can be consolidated once Client.Copy is gone.
@ -361,8 +363,9 @@ func TestConfiguringQuery(t *testing.T) {
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
UseLegacySql: false,
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
ForceSendFields: []string{"UseLegacySql"},
},
},
JobReference: &bq.JobReference{

View File

@ -122,25 +122,71 @@ var (
errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
errInvalidFieldName = errors.New("bigquery: invalid name of field in struct")
errBadNullable = errors.New(`bigquery: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`)
)
var typeOfByteSlice = reflect.TypeOf([]byte{})
// InferSchema tries to derive a BigQuery schema from the supplied struct value.
// NOTE: All fields in the returned Schema are configured to be required,
// unless the corresponding field in the supplied struct is a slice or array.
// Each exported struct field is mapped to a field in the schema.
//
// It is considered an error if the struct (including nested structs) contains
// any exported fields that are pointers or one of the following types:
// uint, uint64, uintptr, map, interface, complex64, complex128, func, chan.
// In these cases, an error will be returned.
// Future versions may handle these cases without error.
// The following BigQuery types are inferred from the corresponding Go types.
// (This is the same mapping as that used for RowIterator.Next.) Fields inferred
// from these types are marked required (non-nullable).
//
// STRING string
// BOOL bool
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
// FLOAT float32, float64
// BYTES []byte
// TIMESTAMP time.Time
// DATE civil.Date
// TIME civil.Time
// DATETIME civil.DateTime
//
// A Go slice or array type is inferred to be a BigQuery repeated field of the
// element type. The element type must be one of the above listed types.
//
// Nullable fields are inferred from the NullXXX types, declared in this package:
//
// STRING NullString
// BOOL NullBool
// INTEGER NullInt64
// FLOAT NullFloat64
// TIMESTAMP NullTimestamp
// DATE NullDate
// TIME NullTime
// DATETIME NullDateTime
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
//
// A struct field that is of struct type is inferred to be a required field of type
// RECORD with a schema inferred recursively. For backwards compatibility, a field of
// type pointer to struct is also inferred to be required. To get a nullable RECORD
// field, use the "nullable" tag (see below).
//
// InferSchema returns an error if any of the examined fields is of type uint,
// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future
// versions may handle these cases without error.
//
// Recursively defined structs are also disallowed.
//
// Struct fields may be tagged in a way similar to the encoding/json package.
// A tag of the form
// bigquery:"name"
// uses "name" instead of the struct field name as the BigQuery field name.
// A tag of the form
// bigquery:"-"
// omits the field from the inferred schema.
// The "nullable" option marks the field as nullable (not required). It is only
// needed for []byte and pointer-to-struct fields, and cannot appear on other
// fields. In this example, the Go name of the field is retained:
// bigquery:",nullable"
func InferSchema(st interface{}) (Schema, error) {
return inferSchemaReflectCached(reflect.TypeOf(st))
}
// TODO(jba): replace with sync.Map for Go 1.9.
var schemaCache atomiccache.Cache
type cacheVal struct {
@ -184,10 +230,14 @@ func inferStruct(t reflect.Type) (Schema, error) {
}
// inferFieldSchema infers the FieldSchema for a Go type
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
// Only []byte and struct pointers can be tagged nullable.
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
return nil, errBadNullable
}
switch rt {
case typeOfByteSlice:
return &FieldSchema{Required: true, Type: BytesFieldType}, nil
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
case typeOfGoTime:
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
case typeOfDate:
@ -197,7 +247,10 @@ func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
case typeOfDateTime:
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
}
if isSupportedIntType(rt) {
if ft := nullableFieldType(rt); ft != "" {
return &FieldSchema{Required: false, Type: ft}, nil
}
if isSupportedIntType(rt) || isSupportedUintType(rt) {
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
}
switch rt.Kind() {
@ -207,26 +260,34 @@ func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
// Multi dimensional slices/arrays are not supported by BigQuery
return nil, errUnsupportedFieldType
}
f, err := inferFieldSchema(et)
if nullableFieldType(et) != "" {
// Repeated nullable types are not supported by BigQuery.
return nil, errUnsupportedFieldType
}
f, err := inferFieldSchema(et, false)
if err != nil {
return nil, err
}
f.Repeated = true
f.Required = false
return f, nil
case reflect.Struct, reflect.Ptr:
case reflect.Ptr:
if rt.Elem().Kind() != reflect.Struct {
return nil, errUnsupportedFieldType
}
fallthrough
case reflect.Struct:
nested, err := inferStruct(rt)
if err != nil {
return nil, err
}
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil
return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
case reflect.String:
return &FieldSchema{Required: true, Type: StringFieldType}, nil
return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
case reflect.Bool:
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil
return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
case reflect.Float32, reflect.Float64:
return &FieldSchema{Required: true, Type: FloatFieldType}, nil
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
default:
return nil, errUnsupportedFieldType
}
@ -240,7 +301,14 @@ func inferFields(rt reflect.Type) (Schema, error) {
return nil, err
}
for _, field := range fields {
f, err := inferFieldSchema(field.Type)
var nullable bool
for _, opt := range field.ParsedTag.([]string) {
if opt == nullableTagOption {
nullable = true
break
}
}
f, err := inferFieldSchema(field.Type, nullable)
if err != nil {
return nil, err
}
@ -250,12 +318,22 @@ func inferFields(rt reflect.Type) (Schema, error) {
return s, nil
}
// isSupportedIntType reports whether t can be properly represented by the
// BigQuery INTEGER/INT64 type.
// isSupportedIntType reports whether t is an int type that can be properly
// represented by the BigQuery INTEGER/INT64 type.
func isSupportedIntType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
reflect.Uint8, reflect.Uint16, reflect.Uint32:
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
default:
return false
}
}
// isSupportedIntType reports whether t is a uint type that can be properly
// represented by the BigQuery INTEGER/INT64 type.
func isSupportedUintType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
return true
default:
return false

View File

@ -248,6 +248,14 @@ func reqField(name, typ string) *FieldSchema {
}
}
func optField(name, typ string) *FieldSchema {
return &FieldSchema{
Name: name,
Type: FieldType(typ),
Required: false,
}
}
func TestSimpleInference(t *testing.T) {
testCases := []struct {
in interface{}
@ -491,6 +499,37 @@ func TestRepeatedInference(t *testing.T) {
}
}
type allNulls struct {
A NullInt64
B NullFloat64
C NullBool
D NullString
E NullTimestamp
F NullTime
G NullDate
H NullDateTime
}
func TestNullInference(t *testing.T) {
got, err := InferSchema(allNulls{})
if err != nil {
t.Fatal(err)
}
want := Schema{
optField("A", "INTEGER"),
optField("B", "FLOAT"),
optField("C", "BOOLEAN"),
optField("D", "STRING"),
optField("E", "TIMESTAMP"),
optField("F", "TIME"),
optField("G", "DATE"),
optField("H", "DATETIME"),
}
if diff := testutil.Diff(got, want); diff != "" {
t.Error(diff)
}
}
type Embedded struct {
Embedded int
}
@ -532,10 +571,11 @@ func TestRecursiveInference(t *testing.T) {
type withTags struct {
NoTag int
ExcludeTag int `bigquery:"-"`
SimpleTag int `bigquery:"simple_tag"`
UnderscoreTag int `bigquery:"_id"`
MixedCase int `bigquery:"MIXEDcase"`
ExcludeTag int `bigquery:"-"`
SimpleTag int `bigquery:"simple_tag"`
UnderscoreTag int `bigquery:"_id"`
MixedCase int `bigquery:"MIXEDcase"`
Nullable []byte `bigquery:",nullable"`
}
type withTagsNested struct {
@ -544,6 +584,8 @@ type withTagsNested struct {
ExcludeTag int `bigquery:"-"`
Inside int `bigquery:"inside"`
} `bigquery:"anon"`
PNested *struct{ X int } // not nullable, for backwards compatibility
PNestedNullable *struct{ X int } `bigquery:",nullable"`
}
type withTagsRepeated struct {
@ -563,6 +605,7 @@ var withTagsSchema = Schema{
reqField("simple_tag", "INTEGER"),
reqField("_id", "INTEGER"),
reqField("MIXEDcase", "INTEGER"),
optField("Nullable", "BYTES"),
}
func TestTagInference(t *testing.T) {
@ -589,6 +632,18 @@ func TestTagInference(t *testing.T) {
Type: "RECORD",
Schema: Schema{reqField("inside", "INTEGER")},
},
&FieldSchema{
Name: "PNested",
Required: true,
Type: "RECORD",
Schema: Schema{reqField("X", "INTEGER")},
},
&FieldSchema{
Name: "PNestedNullable",
Required: false,
Type: "RECORD",
Schema: Schema{reqField("X", "INTEGER")},
},
},
},
{
@ -666,12 +721,6 @@ func TestTagInferenceErrors(t *testing.T) {
}{},
err: errInvalidFieldName,
},
{
in: struct {
OmitEmpty int `bigquery:"abc,omitempty"`
}{},
err: errInvalidFieldName,
},
}
for i, tc := range testCases {
want := tc.err
@ -680,6 +729,13 @@ func TestTagInferenceErrors(t *testing.T) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
}
}
_, err := InferSchema(struct {
X int `bigquery:",optional"`
}{})
if err == nil {
t.Error("got nil, want error")
}
}
func TestSchemaErrors(t *testing.T) {
@ -721,7 +777,7 @@ func TestSchemaErrors(t *testing.T) {
},
{
in: struct{ Ptr *int }{},
err: errNoStruct,
err: errUnsupportedFieldType,
},
{
in: struct{ Interface interface{} }{},
@ -735,6 +791,14 @@ func TestSchemaErrors(t *testing.T) {
in: struct{ MultiDimensional [][][]byte }{},
err: errUnsupportedFieldType,
},
{
in: struct{ SliceOfPointer []*int }{},
err: errUnsupportedFieldType,
},
{
in: struct{ SliceOfNull []NullInt64 }{},
err: errUnsupportedFieldType,
},
{
in: struct{ ChanSlice []chan bool }{},
err: errUnsupportedFieldType,
@ -743,6 +807,42 @@ func TestSchemaErrors(t *testing.T) {
in: struct{ NestedChan struct{ Chan []chan bool } }{},
err: errUnsupportedFieldType,
},
{
in: struct {
X int `bigquery:",nullable"`
}{},
err: errBadNullable,
},
{
in: struct {
X bool `bigquery:",nullable"`
}{},
err: errBadNullable,
},
{
in: struct {
X struct{ N int } `bigquery:",nullable"`
}{},
err: errBadNullable,
},
{
in: struct {
X []int `bigquery:",nullable"`
}{},
err: errBadNullable,
},
{
in: struct{ X *[]byte }{},
err: errUnsupportedFieldType,
},
{
in: struct{ X *[]int }{},
err: errUnsupportedFieldType,
},
{
in: struct{ X *int }{},
err: errUnsupportedFieldType,
},
}
for _, tc := range testCases {
want := tc.err

View File

@ -147,6 +147,11 @@ type TimePartitioning struct {
// The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration
// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
// DATE field. Its mode must be NULLABLE or REQUIRED.
Field string
}
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
@ -156,6 +161,7 @@ func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
return &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: int64(p.Expiration / time.Millisecond),
Field: p.Field,
}
}
@ -165,6 +171,7 @@ func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
}
return &TimePartitioning{
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
Field: q.Field,
}
}

View File

@ -51,6 +51,7 @@ func TestBQToTableMetadata(t *testing.T) {
TimePartitioning: &bq.TimePartitioning{
ExpirationMs: 7890,
Type: "DAY",
Field: "pfield",
},
Type: "EXTERNAL",
View: &bq.ViewDefinition{Query: "view-query"},
@ -72,7 +73,10 @@ func TestBQToTableMetadata(t *testing.T) {
LastModifiedTime: aTime.Truncate(time.Millisecond),
NumBytes: 123,
NumRows: 7,
TimePartitioning: &TimePartitioning{Expiration: 7890 * time.Millisecond},
TimePartitioning: &TimePartitioning{
Expiration: 7890 * time.Millisecond,
Field: "pfield",
},
StreamingBuffer: &StreamingBuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
@ -154,9 +158,12 @@ func TestTableMetadataToBQ(t *testing.T) {
},
{
&TableMetadata{
ViewQuery: "q",
UseStandardSQL: true,
TimePartitioning: &TimePartitioning{time.Second},
ViewQuery: "q",
UseStandardSQL: true,
TimePartitioning: &TimePartitioning{
Expiration: time.Second,
Field: "ofDreams",
},
},
&bq.Table{
View: &bq.ViewDefinition{
@ -167,6 +174,7 @@ func TestTableMetadataToBQ(t *testing.T) {
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1000,
Field: "ofDreams",
},
},
},

View File

@ -54,6 +54,9 @@ type Uploader struct {
// Uploader returns an Uploader that can be used to append rows to t.
// The returned Uploader may optionally be further configured before its Put method is called.
//
// To stream rows into a date-partitioned table at a particular date, add the
// $yyyymmdd suffix to the table name when constructing the Table.
func (t *Table) Uploader() *Uploader {
return &Uploader{t: t}
}
@ -156,6 +159,9 @@ func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
if err != nil {
return err
}
if req == nil {
return nil
}
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
call = call.Context(ctx)
setClientHeader(call.Header())
@ -171,6 +177,9 @@ func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
}
func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
if savers == nil { // If there are no rows, do nothing.
return nil, nil
}
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: u.TableTemplateSuffix,
IgnoreUnknownValues: u.IgnoreUnknownValues,

View File

@ -48,8 +48,9 @@ func TestNewInsertRequest(t *testing.T) {
req *bq.TableDataInsertAllRequest
}{
{
ul: &Uploader{},
req: &bq.TableDataInsertAllRequest{},
ul: &Uploader{},
savers: nil,
req: nil,
},
{
ul: &Uploader{},
@ -92,7 +93,7 @@ func TestNewInsertRequest(t *testing.T) {
}
want := tc.req
if !testutil.Equal(got, want) {
t.Errorf("%#d: %#v: got %#v, want %#v", i, tc.ul, got, want)
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want)
}
}
}
@ -155,6 +156,8 @@ func TestValueSavers(t *testing.T) {
in interface{}
want []ValueSaver
}{
{[]interface{}(nil), nil},
{[]interface{}{}, nil},
{ts, []ValueSaver{ts}},
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
@ -191,6 +194,7 @@ func TestValueSavers(t *testing.T) {
func TestValueSaversErrors(t *testing.T) {
inputs := []interface{}{
nil,
1,
[]int{1, 2},
[]interface{}{

View File

@ -18,6 +18,7 @@ import (
"encoding/base64"
"errors"
"fmt"
"math"
"reflect"
"strconv"
"time"
@ -63,6 +64,8 @@ func loadMap(m map[string]Value, vals []Value, s Schema) {
val := vals[i]
var v interface{}
switch {
case val == nil:
v = val
case f.Schema == nil:
v = val
case !f.Repeated:
@ -79,6 +82,7 @@ func loadMap(m map[string]Value, vals []Value, s Schema) {
}
v = vs
}
m[f.Name] = v
}
}
@ -125,6 +129,18 @@ func setInt(v reflect.Value, x interface{}) error {
return nil
}
func setUint(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
}
xx := x.(int64)
if xx < 0 || v.OverflowUint(uint64(xx)) {
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
}
v.SetUint(uint64(xx))
return nil
}
func setFloat(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
@ -155,9 +171,20 @@ func setString(v reflect.Value, x interface{}) error {
func setBytes(v reflect.Value, x interface{}) error {
if x == nil {
return errNoNulls
v.SetBytes(nil)
} else {
v.SetBytes(x.([]byte))
}
return nil
}
func setNull(v reflect.Value, x interface{}, build func() interface{}) error {
if x == nil {
v.Set(reflect.Zero(v.Type()))
} else {
n := build()
v.Set(reflect.ValueOf(n))
}
v.SetBytes(x.([]byte))
return nil
}
@ -228,7 +255,7 @@ func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, err
return nil, err
}
op.setFunc = func(v reflect.Value, val interface{}) error {
return setNested(nested, v, val.([]Value))
return setNested(nested, v, val)
}
} else {
op.setFunc = determineSetFunc(t, schemaField.Type)
@ -253,6 +280,13 @@ func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
if ftype.Kind() == reflect.String {
return setString
}
if ftype == typeOfNullString {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullString{StringVal: x.(string), Valid: true}
})
}
}
case BytesFieldType:
if ftype == typeOfByteSlice {
@ -260,40 +294,91 @@ func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
}
case IntegerFieldType:
if isSupportedIntType(ftype) {
if isSupportedUintType(ftype) {
return setUint
} else if isSupportedIntType(ftype) {
return setInt
}
if ftype == typeOfNullInt64 {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullInt64{Int64: x.(int64), Valid: true}
})
}
}
case FloatFieldType:
switch ftype.Kind() {
case reflect.Float32, reflect.Float64:
return setFloat
}
if ftype == typeOfNullFloat64 {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullFloat64{Float64: x.(float64), Valid: true}
})
}
}
case BooleanFieldType:
if ftype.Kind() == reflect.Bool {
return setBool
}
if ftype == typeOfNullBool {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullBool{Bool: x.(bool), Valid: true}
})
}
}
case TimestampFieldType:
if ftype == typeOfGoTime {
return setAny
}
if ftype == typeOfNullTimestamp {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullTimestamp{Timestamp: x.(time.Time), Valid: true}
})
}
}
case DateFieldType:
if ftype == typeOfDate {
return setAny
}
if ftype == typeOfNullDate {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullDate{Date: x.(civil.Date), Valid: true}
})
}
}
case TimeFieldType:
if ftype == typeOfTime {
return setAny
}
if ftype == typeOfNullTime {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullTime{Time: x.(civil.Time), Valid: true}
})
}
}
case DateTimeFieldType:
if ftype == typeOfDateTime {
return setAny
}
if ftype == typeOfNullDateTime {
return func(v reflect.Value, x interface{}) error {
return setNull(v, x, func() interface{} {
return NullDateTime{DateTime: x.(civil.DateTime), Valid: true}
})
}
}
}
return nil
}
@ -323,16 +408,21 @@ func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
return nil
}
func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error {
func setNested(ops []structLoaderOp, v reflect.Value, val interface{}) error {
// v is either a struct or a pointer to a struct.
if v.Kind() == reflect.Ptr {
// If the value is nil, set the pointer to nil.
if val == nil {
v.Set(reflect.Zero(v.Type()))
return nil
}
// If the pointer is nil, set it to a zero struct value.
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return runOps(ops, v, vals)
return runOps(ops, v, val.([]Value))
}
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
@ -404,6 +494,10 @@ func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
m := make(map[string]Value)
for i, fieldSchema := range schema {
if vs[i] == nil {
m[fieldSchema.Name] = nil
continue
}
if fieldSchema.Type != RecordFieldType {
m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
continue
@ -550,10 +644,16 @@ func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
switch fs.Type {
case TimeFieldType:
if v.Type() == typeOfNullTime {
return v.Interface()
}
return civilToUploadValue(v, fs, func(v reflect.Value) string {
return CivilTimeString(v.Interface().(civil.Time))
})
case DateTimeFieldType:
if v.Type() == typeOfNullDateTime {
return v.Interface()
}
return civilToUploadValue(v, fs, func(v reflect.Value) string {
return CivilDateTimeString(v.Interface().(civil.DateTime))
})
@ -705,7 +805,12 @@ func convertBasicType(val string, typ FieldType) (Value, error) {
return strconv.ParseBool(val)
case TimestampFieldType:
f, err := strconv.ParseFloat(val, 64)
return Value(time.Unix(0, int64(f*1e9)).UTC()), err
if err != nil {
return nil, err
}
secs := math.Trunc(f)
nanos := (f - secs) * 1e9
return Value(time.Unix(int64(secs), int64(nanos)).UTC()), nil
case DateFieldType:
return civil.ParseDate(val)
case TimeFieldType:

View File

@ -24,7 +24,6 @@ import (
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
@ -89,6 +88,20 @@ func TestConvertTime(t *testing.T) {
}
}
func TestConvertSmallTimes(t *testing.T) {
for _, year := range []int{1600, 1066, 1} {
want := time.Date(year, time.January, 1, 0, 0, 0, 0, time.UTC)
s := fmt.Sprintf("%.10f", float64(want.Unix()))
got, err := convertBasicType(s, TimestampFieldType)
if err != nil {
t.Fatal(err)
}
if !got.(time.Time).Equal(want) {
t.Errorf("got %v, want %v", got, want)
}
}
}
func TestConvertNullValues(t *testing.T) {
schema := []*FieldSchema{
{Type: StringFieldType},
@ -512,6 +525,7 @@ func TestStructSaver(t *testing.T) {
{Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{
{Name: "b", Type: BooleanFieldType},
}},
{Name: "p", Type: IntegerFieldType, Required: false},
}
type (
@ -523,6 +537,7 @@ func TestStructSaver(t *testing.T) {
TR []civil.Time
Nested *N
Rnested []*N
P NullInt64
}
)
@ -539,10 +554,11 @@ func TestStructSaver(t *testing.T) {
if wantIID := "iid"; gotIID != wantIID {
t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID)
}
if !testutil.Equal(got, want) {
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("%s: %s", msg, diff)
}
}
ct1 := civil.Time{1, 2, 3, 4000}
ct2 := civil.Time{5, 6, 7, 8000}
in := T{
@ -552,6 +568,7 @@ func TestStructSaver(t *testing.T) {
TR: []civil.Time{ct1, ct2},
Nested: &N{B: true},
Rnested: []*N{{true}, {false}},
P: NullInt64{Valid: true, Int64: 17},
}
want := map[string]Value{
"s": "x",
@ -560,10 +577,11 @@ func TestStructSaver(t *testing.T) {
"tr": []string{"01:02:03.000004", "05:06:07.000008"},
"nested": map[string]Value{"b": true},
"rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}},
"p": NullInt64{Valid: true, Int64: 17},
}
check("all values", in, want)
check("all values, ptr", &in, want)
check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00"})
check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00", "p": NullInt64{}})
// Missing and extra fields ignored.
type T2 struct {
@ -577,6 +595,7 @@ func TestStructSaver(t *testing.T) {
map[string]Value{
"s": "",
"t": "00:00:00",
"p": NullInt64{},
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
})
}
@ -713,6 +732,22 @@ func TestValueMap(t *testing.T) {
t.Errorf("got\n%+v\nwant\n%+v", vm, want)
}
in = make([]Value, len(schema))
want = map[string]Value{
"s": nil,
"i": nil,
"f": nil,
"b": nil,
"n": nil,
"rn": nil,
}
var vm2 valueMap
if err := vm2.Load(in, schema); err != nil {
t.Fatal(err)
}
if !testutil.Equal(vm2, valueMap(want)) {
t.Errorf("got\n%+v\nwant\n%+v", vm2, want)
}
}
var (
@ -722,6 +757,7 @@ var (
{Name: "s2", Type: StringFieldType},
{Name: "by", Type: BytesFieldType},
{Name: "I", Type: IntegerFieldType},
{Name: "U", Type: IntegerFieldType},
{Name: "F", Type: FloatFieldType},
{Name: "B", Type: BooleanFieldType},
{Name: "TS", Type: TimestampFieldType},
@ -740,7 +776,7 @@ var (
testTime = civil.Time{7, 50, 22, 8}
testDateTime = civil.DateTime{testDate, testTime}
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), 3.14, true,
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), int64(8), 3.14, true,
testTimestamp, testDate, testTime, testDateTime,
[]Value{"nested", int64(17)}, "z"}
)
@ -748,6 +784,7 @@ var (
type testStruct1 struct {
B bool
I int
U uint16
times
S string
S2 String
@ -774,14 +811,13 @@ type times struct {
func TestStructLoader(t *testing.T) {
var ts1 testStruct1
if err := load(&ts1, schema2, testValues); err != nil {
t.Fatal(err)
}
mustLoad(t, &ts1, schema2, testValues)
// Note: the schema field named "s" gets matched to the exported struct
// field "S", not the unexported "s".
want := &testStruct1{
B: true,
I: 7,
U: 8,
F: 3.14,
times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime},
S: "x",
@ -790,33 +826,25 @@ func TestStructLoader(t *testing.T) {
Nested: nested{NestS: "nested", NestI: 17},
Tagged: "z",
}
if !testutil.Equal(&ts1, want, cmp.AllowUnexported(testStruct1{})) {
t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want))
d, _, err := pretty.Diff(*want, ts1)
if err == nil {
t.Logf("diff:\n%s", d)
}
if diff := testutil.Diff(&ts1, want, cmp.AllowUnexported(testStruct1{})); diff != "" {
t.Error(diff)
}
// Test pointers to nested structs.
type nestedPtr struct{ Nested *nested }
var np nestedPtr
if err := load(&np, schema2, testValues); err != nil {
t.Fatal(err)
}
mustLoad(t, &np, schema2, testValues)
want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}}
if !testutil.Equal(&np, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
if diff := testutil.Diff(&np, want2); diff != "" {
t.Error(diff)
}
// Existing values should be reused.
nst := &nested{NestS: "x", NestI: -10}
np = nestedPtr{Nested: nst}
if err := load(&np, schema2, testValues); err != nil {
t.Fatal(err)
}
if !testutil.Equal(&np, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
mustLoad(t, &np, schema2, testValues)
if diff := testutil.Diff(&np, want2); diff != "" {
t.Error(diff)
}
if np.Nested != nst {
t.Error("nested struct pointers not equal")
@ -851,28 +879,23 @@ var (
func TestStructLoaderRepeated(t *testing.T) {
var r1 repStruct
if err := load(&r1, repSchema, repValues); err != nil {
t.Fatal(err)
}
mustLoad(t, &r1, repSchema, repValues)
want := repStruct{
Nums: []int{1, 2, 3},
ShortNums: [...]int{1, 2}, // extra values discarded
LongNums: [...]int{1, 2, 3, 0, 0},
Nested: []*nested{{"x", 1}, {"y", 2}},
}
if !testutil.Equal(r1, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want))
if diff := testutil.Diff(r1, want); diff != "" {
t.Error(diff)
}
r2 := repStruct{
Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to
LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed
}
if err := load(&r2, repSchema, repValues); err != nil {
t.Fatal(err)
}
if !testutil.Equal(r2, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want))
mustLoad(t, &r2, repSchema, repValues)
if diff := testutil.Diff(r2, want); diff != "" {
t.Error(diff)
}
if got, want := cap(r2.Nums), 5; got != want {
t.Errorf("cap(r2.Nums) = %d, want %d", got, want)
@ -880,33 +903,109 @@ func TestStructLoaderRepeated(t *testing.T) {
// Short slice case.
r3 := repStruct{Nums: []int{-1}}
if err := load(&r3, repSchema, repValues); err != nil {
t.Fatal(err)
}
if !testutil.Equal(r3, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want))
mustLoad(t, &r3, repSchema, repValues)
if diff := testutil.Diff(r3, want); diff != "" {
t.Error(diff)
}
if got, want := cap(r3.Nums), 3; got != want {
t.Errorf("cap(r3.Nums) = %d, want %d", got, want)
}
}
type testStructNullable struct {
String NullString
Bytes []byte
Integer NullInt64
Float NullFloat64
Boolean NullBool
Timestamp NullTimestamp
Date NullDate
Time NullTime
DateTime NullDateTime
Record *subNullable
}
type subNullable struct {
X NullInt64
}
var testStructNullableSchema = Schema{
{Name: "String", Type: StringFieldType, Required: false},
{Name: "Bytes", Type: BytesFieldType, Required: false},
{Name: "Integer", Type: IntegerFieldType, Required: false},
{Name: "Float", Type: FloatFieldType, Required: false},
{Name: "Boolean", Type: BooleanFieldType, Required: false},
{Name: "Timestamp", Type: TimestampFieldType, Required: false},
{Name: "Date", Type: DateFieldType, Required: false},
{Name: "Time", Type: TimeFieldType, Required: false},
{Name: "DateTime", Type: DateTimeFieldType, Required: false},
{Name: "Record", Type: RecordFieldType, Required: false, Schema: Schema{
{Name: "X", Type: IntegerFieldType, Required: false},
}},
}
func TestStructLoaderNullable(t *testing.T) {
var ts testStructNullable
nilVals := []Value{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}
mustLoad(t, &ts, testStructNullableSchema, nilVals)
want := testStructNullable{}
if diff := testutil.Diff(ts, want); diff != "" {
t.Error(diff)
}
nonnilVals := []Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, testTime, testDateTime, []Value{int64(4)}}
// All ts fields are nil. Loading non-nil values will cause them all to
// be allocated.
mustLoad(t, &ts, testStructNullableSchema, nonnilVals)
want = testStructNullable{
String: NullString{StringVal: "x", Valid: true},
Bytes: []byte{1, 2, 3},
Integer: NullInt64{Int64: 1, Valid: true},
Float: NullFloat64{Float64: 2.3, Valid: true},
Boolean: NullBool{Bool: true, Valid: true},
Timestamp: NullTimestamp{Timestamp: testTimestamp, Valid: true},
Date: NullDate{Date: testDate, Valid: true},
Time: NullTime{Time: testTime, Valid: true},
DateTime: NullDateTime{DateTime: testDateTime, Valid: true},
Record: &subNullable{X: NullInt64{Int64: 4, Valid: true}},
}
if diff := testutil.Diff(ts, want); diff != "" {
t.Error(diff)
}
// Struct pointers are reused, byte slices are not.
want = ts
want.Bytes = []byte{17}
vals2 := []Value{nil, []byte{17}, nil, nil, nil, nil, nil, nil, nil, []Value{int64(7)}}
mustLoad(t, &ts, testStructNullableSchema, vals2)
if ts.Record != want.Record {
t.Error("record pointers not identical")
}
}
func TestStructLoaderOverflow(t *testing.T) {
type S struct {
I int16
U uint16
F float32
}
schema := Schema{
{Name: "I", Type: IntegerFieldType},
{Name: "U", Type: IntegerFieldType},
{Name: "F", Type: FloatFieldType},
}
var s S
if err := load(&s, schema, []Value{int64(math.MaxInt16 + 1), 0}); err == nil {
t.Error("int: got nil, want error")
}
if err := load(&s, schema, []Value{int64(0), math.MaxFloat32 * 2}); err == nil {
t.Error("float: got nil, want error")
z64 := int64(0)
for _, vals := range [][]Value{
{int64(math.MaxInt16 + 1), z64, 0},
{z64, int64(math.MaxInt32), 0},
{z64, int64(-1), 0},
{z64, z64, math.MaxFloat32 * 2},
} {
if err := load(&s, schema, vals); err == nil {
t.Errorf("%+v: got nil, want error", vals)
}
}
}
@ -922,20 +1021,18 @@ func TestStructLoaderFieldOverlap(t *testing.T) {
t.Fatal(err)
}
want1 := S1{I: 7}
if !testutil.Equal(s1, want1) {
t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1))
if diff := testutil.Diff(s1, want1); diff != "" {
t.Error(diff)
}
// It's even valid to have no overlapping fields at all.
type S2 struct{ Z int }
var s2 S2
if err := load(&s2, schema2, testValues); err != nil {
t.Fatal(err)
}
mustLoad(t, &s2, schema2, testValues)
want2 := S2{}
if !testutil.Equal(s2, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2))
if diff := testutil.Diff(s2, want2); diff != "" {
t.Error(diff)
}
}
@ -989,21 +1086,17 @@ func TestStructLoaderErrors(t *testing.T) {
{Name: "f", Type: FloatFieldType},
{Name: "b", Type: BooleanFieldType},
{Name: "s", Type: StringFieldType},
{Name: "by", Type: BytesFieldType},
{Name: "d", Type: DateFieldType},
}
type s struct {
I int
F float64
B bool
S string
By []byte
D civil.Date
}
vals := []Value{int64(0), 0.0, false, "", []byte{}, testDate}
if err := load(&s{}, schema, vals); err != nil {
t.Fatal(err)
I int
F float64
B bool
S string
D civil.Date
}
vals := []Value{int64(0), 0.0, false, "", testDate}
mustLoad(t, &s{}, schema, vals)
for i, e := range vals {
vals[i] = nil
got := load(&s{}, schema, vals)
@ -1033,6 +1126,12 @@ func TestStructLoaderErrors(t *testing.T) {
}
}
func mustLoad(t *testing.T, pval interface{}, schema Schema, vals []Value) {
if err := load(pval, schema, vals); err != nil {
t.Fatalf("loading: %v", err)
}
}
func load(pval interface{}, schema Schema, vals []Value) error {
var sl structLoader
if err := sl.set(pval, schema); err != nil {

View File

@ -18,28 +18,36 @@ package bigtable
import (
"fmt"
"math"
"regexp"
"strings"
"time"
"cloud.google.com/go/bigtable/internal/gax"
btopt "cloud.google.com/go/bigtable/internal/option"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
"github.com/golang/protobuf/ptypes"
durpb "github.com/golang/protobuf/ptypes/duration"
"golang.org/x/net/context"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
gtransport "google.golang.org/api/transport/grpc"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/grpc/codes"
)
const adminAddr = "bigtableadmin.googleapis.com:443"
// AdminClient is a client type for performing admin operations within a specific instance.
type AdminClient struct {
conn *grpc.ClientConn
tClient btapb.BigtableTableAdminClient
conn *grpc.ClientConn
tClient btapb.BigtableTableAdminClient
lroClient *lroauto.OperationsClient
project, instance string
@ -53,17 +61,32 @@ func NewAdminClient(ctx context.Context, project, instance string, opts ...optio
if err != nil {
return nil, err
}
// Need to add scopes for long running operations (for create table & snapshots)
o = append(o, option.WithScopes(cloudresourcemanager.CloudPlatformScope))
o = append(o, opts...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return &AdminClient{
conn: conn,
tClient: btapb.NewBigtableTableAdminClient(conn),
project: project,
instance: instance,
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
conn: conn,
tClient: btapb.NewBigtableTableAdminClient(conn),
lroClient: lroClient,
project: project,
instance: instance,
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
}, nil
}
@ -245,6 +268,283 @@ func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix str
return err
}
// CreateTableFromSnapshot creates a table from snapshot.
// The table will be created in the same cluster as the snapshot.
//
// This is a private alpha release of Cloud Bigtable snapshots. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (ac *AdminClient) CreateTableFromSnapshot(ctx context.Context, table, cluster, snapshot string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
snapshotPath := prefix + "/clusters/" + cluster + "/snapshots/" + snapshot
req := &btapb.CreateTableFromSnapshotRequest{
Parent: prefix,
TableId: table,
SourceSnapshot: snapshotPath,
}
op, err := ac.tClient.CreateTableFromSnapshot(ctx, req)
if err != nil {
return err
}
resp := btapb.Table{}
return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp)
}
const DefaultSnapshotDuration time.Duration = 0
// Creates a new snapshot in the specified cluster from the specified source table.
// Setting the ttl to `DefaultSnapshotDuration` will use the server side default for the duration.
//
// This is a private alpha release of Cloud Bigtable snapshots. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (ac *AdminClient) SnapshotTable(ctx context.Context, table, cluster, snapshot string, ttl time.Duration) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
var ttlProto *durpb.Duration
if ttl > 0 {
ttlProto = ptypes.DurationProto(ttl)
}
req := &btapb.SnapshotTableRequest{
Name: prefix + "/tables/" + table,
Cluster: prefix + "/clusters/" + cluster,
SnapshotId: snapshot,
Ttl: ttlProto,
}
op, err := ac.tClient.SnapshotTable(ctx, req)
if err != nil {
return err
}
resp := btapb.Snapshot{}
return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp)
}
// Returns a SnapshotIterator for iterating over the snapshots in a cluster.
// To list snapshots across all of the clusters in the instance specify "-" as the cluster.
//
// This is a private alpha release of Cloud Bigtable snapshots. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (ac *AdminClient) ListSnapshots(ctx context.Context, cluster string) *SnapshotIterator {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
clusterPath := prefix + "/clusters/" + cluster
it := &SnapshotIterator{}
req := &btapb.ListSnapshotsRequest{
Parent: clusterPath,
}
fetch := func(pageSize int, pageToken string) (string, error) {
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
resp, err := ac.tClient.ListSnapshots(ctx, req)
if err != nil {
return "", err
}
for _, s := range resp.Snapshots {
snapshotInfo, err := newSnapshotInfo(s)
if err != nil {
return "", fmt.Errorf("Failed to parse snapshot proto %v", err)
}
it.items = append(it.items, snapshotInfo)
}
return resp.NextPageToken, nil
}
bufLen := func() int { return len(it.items) }
takeBuf := func() interface{} { b := it.items; it.items = nil; return b }
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, bufLen, takeBuf)
return it
}
func newSnapshotInfo(snapshot *btapb.Snapshot) (*SnapshotInfo, error) {
nameParts := strings.Split(snapshot.Name, "/")
name := nameParts[len(nameParts)-1]
tablePathParts := strings.Split(snapshot.SourceTable.Name, "/")
tableId := tablePathParts[len(tablePathParts)-1]
createTime, err := ptypes.Timestamp(snapshot.CreateTime)
if err != nil {
return nil, fmt.Errorf("Invalid createTime: %v", err)
}
deleteTime, err := ptypes.Timestamp(snapshot.DeleteTime)
if err != nil {
return nil, fmt.Errorf("Invalid deleteTime: %v", err)
}
return &SnapshotInfo{
Name: name,
SourceTable: tableId,
DataSize: snapshot.DataSizeBytes,
CreateTime: createTime,
DeleteTime: deleteTime,
}, nil
}
// An EntryIterator iterates over log entries.
//
// This is a private alpha release of Cloud Bigtable snapshots. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
type SnapshotIterator struct {
items []*SnapshotInfo
pageInfo *iterator.PageInfo
nextFunc func() error
}
// PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details.
func (it *SnapshotIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done
// (https://godoc.org/google.golang.org/api/iterator) if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *SnapshotIterator) Next() (*SnapshotInfo, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}
type SnapshotInfo struct {
Name string
SourceTable string
DataSize int64
CreateTime time.Time
DeleteTime time.Time
}
// Get snapshot metadata.
//
// This is a private alpha release of Cloud Bigtable snapshots. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (ac *AdminClient) SnapshotInfo(ctx context.Context, cluster, snapshot string) (*SnapshotInfo, error) {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
clusterPath := prefix + "/clusters/" + cluster
snapshotPath := clusterPath + "/snapshots/" + snapshot
req := &btapb.GetSnapshotRequest{
Name: snapshotPath,
}
resp, err := ac.tClient.GetSnapshot(ctx, req)
if err != nil {
return nil, err
}
return newSnapshotInfo(resp)
}
// Delete a snapshot in a cluster.
//
// This is a private alpha release of Cloud Bigtable snapshots. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (ac *AdminClient) DeleteSnapshot(ctx context.Context, cluster, snapshot string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
clusterPath := prefix + "/clusters/" + cluster
snapshotPath := clusterPath + "/snapshots/" + snapshot
req := &btapb.DeleteSnapshotRequest{
Name: snapshotPath,
}
_, err := ac.tClient.DeleteSnapshot(ctx, req)
return err
}
// getConsistencyToken gets the consistency token for a table.
func (ac *AdminClient) getConsistencyToken(ctx context.Context, tableName string) (string, error) {
req := &btapb.GenerateConsistencyTokenRequest{
Name: tableName,
}
resp, err := ac.tClient.GenerateConsistencyToken(ctx, req)
if err != nil {
return "", err
}
return resp.GetConsistencyToken(), nil
}
// isConsistent checks if a token is consistent for a table.
func (ac *AdminClient) isConsistent(ctx context.Context, tableName, token string) (bool, error) {
req := &btapb.CheckConsistencyRequest{
Name: tableName,
ConsistencyToken: token,
}
var resp *btapb.CheckConsistencyResponse
// Retry calls on retryable errors to avoid losing the token gathered before.
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
resp, err = ac.tClient.CheckConsistency(ctx, req)
return err
}, retryOptions...)
if err != nil {
return false, err
}
return resp.GetConsistent(), nil
}
// WaitForReplication waits until all the writes committed before the call started have been propagated to all the clusters in the instance via replication.
//
// This is a private alpha release of Cloud Bigtable replication. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (ac *AdminClient) WaitForReplication(ctx context.Context, table string) error {
// Get the token.
prefix := ac.instancePrefix()
tableName := prefix + "/tables/" + table
token, err := ac.getConsistencyToken(ctx, tableName)
if err != nil {
return err
}
// Periodically check if the token is consistent.
timer := time.NewTicker(time.Second * 10)
defer timer.Stop()
for {
consistent, err := ac.isConsistent(ctx, tableName, token)
if err != nil {
return err
}
if consistent {
return nil
}
// Sleep for a bit or until the ctx is cancelled.
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
}
}
}
const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
// InstanceAdminClient is a client type for performing admin operations on instances.
@ -336,24 +636,55 @@ type InstanceConf struct {
InstanceType InstanceType
}
// InstanceWithClustersConfig contains the information necessary to create an Instance
type InstanceWithClustersConfig struct {
InstanceID, DisplayName string
Clusters []ClusterConfig
InstanceType InstanceType
}
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
// CreateInstance creates a new instance in the project.
// This method will return when the instance has been created or when an error occurs.
func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error {
ctx = mergeOutgoingMetadata(ctx, iac.md)
req := &btapb.CreateInstanceRequest{
Parent: "projects/" + iac.project,
InstanceId: conf.InstanceId,
Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)},
Clusters: map[string]*btapb.Cluster{
conf.ClusterId: {
ServeNodes: conf.NumNodes,
DefaultStorageType: conf.StorageType.proto(),
Location: "projects/" + iac.project + "/locations/" + conf.Zone,
newConfig := InstanceWithClustersConfig{
InstanceID: conf.InstanceId,
DisplayName: conf.DisplayName,
InstanceType: conf.InstanceType,
Clusters: []ClusterConfig{
{
InstanceID: conf.InstanceId,
ClusterID: conf.ClusterId,
Zone: conf.Zone,
NumNodes: conf.NumNodes,
StorageType: conf.StorageType,
},
},
}
return iac.CreateInstanceWithClusters(ctx, &newConfig)
}
// CreateInstance creates a new instance with configured clusters in the project.
// This method will return when the instance has been created or when an error occurs.
//
// Instances with multiple clusters are part of a private alpha release of Cloud Bigtable replication.
// This feature is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (iac *InstanceAdminClient) CreateInstanceWithClusters(ctx context.Context, conf *InstanceWithClustersConfig) error {
ctx = mergeOutgoingMetadata(ctx, iac.md)
clusters := make(map[string]*btapb.Cluster)
for _, cluster := range conf.Clusters {
clusters[cluster.ClusterID] = cluster.proto(iac.project)
}
req := &btapb.CreateInstanceRequest{
Parent: "projects/" + iac.project,
InstanceId: conf.InstanceID,
Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)},
Clusters: clusters,
}
lro, err := iac.iClient.CreateInstance(ctx, req)
if err != nil {
@ -421,3 +752,99 @@ func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId str
DisplayName: res.DisplayName,
}, nil
}
// ClusterConfig contains the information necessary to create a cluster
type ClusterConfig struct {
InstanceID, ClusterID, Zone string
NumNodes int32
StorageType StorageType
}
func (cc *ClusterConfig) proto(project string) *btapb.Cluster {
return &btapb.Cluster{
ServeNodes: cc.NumNodes,
DefaultStorageType: cc.StorageType.proto(),
Location: "projects/" + project + "/locations/" + cc.Zone,
}
}
// ClusterInfo represents information about a cluster.
type ClusterInfo struct {
Name string // name of the cluster
Zone string // GCP zone of the cluster (e.g. "us-central1-a")
ServeNodes int // number of allocated serve nodes
State string // state of the cluster
}
// CreateCluster creates a new cluster in an instance.
// This method will return when the cluster has been created or when an error occurs.
//
// This is a private alpha release of Cloud Bigtable replication. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (iac *InstanceAdminClient) CreateCluster(ctx context.Context, conf *ClusterConfig) error {
ctx = mergeOutgoingMetadata(ctx, iac.md)
req := &btapb.CreateClusterRequest{
Parent: "projects/" + iac.project + "/instances/" + conf.InstanceID,
ClusterId: conf.ClusterID,
Cluster: conf.proto(iac.project),
}
lro, err := iac.iClient.CreateCluster(ctx, req)
if err != nil {
return err
}
resp := btapb.Cluster{}
return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp)
}
// DeleteCluster deletes a cluster from an instance.
//
// This is a private alpha release of Cloud Bigtable replication. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceId, clusterId string) error {
ctx = mergeOutgoingMetadata(ctx, iac.md)
req := &btapb.DeleteClusterRequest{"projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId}
_, err := iac.iClient.DeleteCluster(ctx, req)
return err
}
// UpdateCluster updates attributes of a cluster
func (iac *InstanceAdminClient) UpdateCluster(ctx context.Context, instanceId, clusterId string, serveNodes int32) error {
ctx = mergeOutgoingMetadata(ctx, iac.md)
cluster := &btapb.Cluster{
Name: "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId,
ServeNodes: serveNodes}
lro, err := iac.iClient.UpdateCluster(ctx, cluster)
if err != nil {
return err
}
return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, nil)
}
// Clusters lists the clusters in an instance.
func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceId string) ([]*ClusterInfo, error) {
ctx = mergeOutgoingMetadata(ctx, iac.md)
req := &btapb.ListClustersRequest{Parent: "projects/" + iac.project + "/instances/" + instanceId}
res, err := iac.iClient.ListClusters(ctx, req)
if err != nil {
return nil, err
}
// TODO(garyelliott): Deal with failed_locations.
var cis []*ClusterInfo
for _, c := range res.Clusters {
nameParts := strings.Split(c.Name, "/")
locParts := strings.Split(c.Location, "/")
cis = append(cis, &ClusterInfo{
Name: nameParts[len(nameParts)-1],
Zone: locParts[len(locParts)-1],
ServeNodes: int(c.ServeNodes),
State: c.State.String(),
})
}
return cis, nil
}

View File

@ -15,13 +15,16 @@
package bigtable
import (
"math"
"sort"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"fmt"
"golang.org/x/net/context"
"reflect"
"google.golang.org/api/iterator"
"strings"
)
@ -97,6 +100,9 @@ func TestAdminIntegration(t *testing.T) {
if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) {
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
}
adminClient.WaitForReplication(ctx, "mytable")
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
t.Fatalf("Deleting table: %v", err)
}
@ -126,7 +132,7 @@ func TestAdminIntegration(t *testing.T) {
}
sort.Strings(tblInfo.Families)
wantFams := []string{"fam1", "fam2"}
if !reflect.DeepEqual(tblInfo.Families, wantFams) {
if !testutil.Equal(tblInfo.Families, wantFams) {
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams)
}
@ -176,3 +182,120 @@ func TestAdminIntegration(t *testing.T) {
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5)
}
}
func TestAdminSnapshotIntegration(t *testing.T) {
testEnv, err := NewIntegrationEnv()
if err != nil {
t.Fatalf("IntegrationEnv: %v", err)
}
defer testEnv.Close()
if !testEnv.Config().UseProd {
t.Skip("emulator doesn't support snapshots")
}
timeout := 2 * time.Second
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
}
ctx, _ := context.WithTimeout(context.Background(), timeout)
adminClient, err := testEnv.NewAdminClient()
if err != nil {
t.Fatalf("NewAdminClient: %v", err)
}
defer adminClient.Close()
table := testEnv.Config().Table
cluster := testEnv.Config().Cluster
list := func(cluster string) ([]*SnapshotInfo, error) {
infos := []*SnapshotInfo(nil)
it := adminClient.ListSnapshots(ctx, cluster)
for {
s, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return nil, err
}
infos = append(infos, s)
}
return infos, err
}
// Delete the table at the end of the test. Schedule ahead of time
// in case the client fails
defer adminClient.DeleteTable(ctx, table)
if err := adminClient.CreateTable(ctx, table); err != nil {
t.Fatalf("Creating table: %v", err)
}
// Precondition: no snapshots
snapshots, err := list(cluster)
if err != nil {
t.Fatalf("Initial snapshot list: %v", err)
}
if got, want := len(snapshots), 0; got != want {
t.Fatalf("Initial snapshot list len: %d, want: %d", got, want)
}
// Create snapshot
defer adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot")
if err = adminClient.SnapshotTable(ctx, table, cluster, "mysnapshot", 5*time.Hour); err != nil {
t.Fatalf("Creating snaphot: %v", err)
}
// List snapshot
snapshots, err = list(cluster)
if err != nil {
t.Fatalf("Listing snapshots: %v", err)
}
if got, want := len(snapshots), 1; got != want {
t.Fatalf("Listing snapshot count: %d, want: %d", got, want)
}
if got, want := snapshots[0].Name, "mysnapshot"; got != want {
t.Fatalf("Snapshot name: %s, want: %s", got, want)
}
if got, want := snapshots[0].SourceTable, table; got != want {
t.Fatalf("Snapshot SourceTable: %s, want: %s", got, want)
}
if got, want := snapshots[0].DeleteTime, snapshots[0].CreateTime.Add(5*time.Hour); math.Abs(got.Sub(want).Minutes()) > 1 {
t.Fatalf("Snapshot DeleteTime: %s, want: %s", got, want)
}
// Get snapshot
snapshot, err := adminClient.SnapshotInfo(ctx, cluster, "mysnapshot")
if err != nil {
t.Fatalf("SnapshotInfo: %v", snapshot)
}
if got, want := *snapshot, *snapshots[0]; got != want {
t.Fatalf("SnapshotInfo: %v, want: %v", got, want)
}
// Restore
restoredTable := table + "-restored"
defer adminClient.DeleteTable(ctx, restoredTable)
if err = adminClient.CreateTableFromSnapshot(ctx, restoredTable, cluster, "mysnapshot"); err != nil {
t.Fatalf("CreateTableFromSnapshot: %v", err)
}
if _, err := adminClient.TableInfo(ctx, restoredTable); err != nil {
t.Fatalf("Restored TableInfo: %v", err)
}
// Delete snapshot
if err = adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot"); err != nil {
t.Fatalf("DeleteSnapshot: %v", err)
}
snapshots, err = list(cluster)
if err != nil {
t.Fatalf("List after Delete: %v", err)
}
if got, want := len(snapshots), 0; got != want {
t.Fatalf("List after delete len: %d, want: %d", got, want)
}
}

View File

@ -44,10 +44,28 @@ type Client struct {
conn *grpc.ClientConn
client btpb.BigtableClient
project, instance string
// App Profiles are part of the private alpha release of Cloud Bigtable replication.
// This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
appProfile string
}
// ClientConfig has configurations for the client.
type ClientConfig struct {
// The id of the app profile to associate with all data operations sent from this client.
// If unspecified, the default app profile for the instance will be used.
AppProfile string
}
// NewClient creates a new Client for a given project and instance.
// The default ClientConfig will be used.
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...)
}
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
if err != nil {
return nil, err
@ -130,9 +148,16 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
var prevRowKey string
err := gax.Invoke(ctx, func(ctx context.Context) error {
if !arg.valid() {
// Empty row set, no need to make an API call.
// NOTE: we must return early if arg == RowList{} because reading
// an empty RowList from bigtable returns all rows from that table.
return nil
}
req := &btpb.ReadRowsRequest{
TableName: t.c.fullTableName(t.table),
Rows: arg.proto(),
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
Rows: arg.proto(),
}
for _, opt := range opts {
opt.set(req)
@ -313,7 +338,7 @@ func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
}
func (r RowRange) valid() bool {
return r.start < r.limit
return r.Unbounded() || r.start < r.limit
}
// RowRangeList is a sequence of RowRanges representing the union of the ranges.
@ -440,9 +465,10 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
var callOptions []gax.CallOption
if m.cond == nil {
req := &btpb.MutateRowRequest{
TableName: t.c.fullTableName(t.table),
RowKey: []byte(row),
Mutations: m.ops,
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
RowKey: []byte(row),
Mutations: m.ops,
}
if mutationsAreRetryable(m.ops) {
callOptions = retryOptions
@ -461,13 +487,20 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
req := &btpb.CheckAndMutateRowRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
RowKey: []byte(row),
PredicateFilter: m.cond.proto(),
}
if m.mtrue != nil {
if m.mtrue.cond != nil {
return errors.New("bigtable: conditional mutations cannot be nested")
}
req.TrueMutations = m.mtrue.ops
}
if m.mfalse != nil {
if m.mfalse.cond != nil {
return errors.New("bigtable: conditional mutations cannot be nested")
}
req.FalseMutations = m.mfalse.ops
}
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
@ -670,8 +703,9 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...
entries[i] = entryErr.Entry
}
req := &btpb.MutateRowsRequest{
TableName: t.c.fullTableName(t.table),
Entries: entries,
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
Entries: entries,
}
stream, err := t.c.client.MutateRows(ctx, req)
if err != nil {
@ -729,9 +763,10 @@ func (ts Timestamp) TruncateToMilliseconds() Timestamp {
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
req := &btpb.ReadModifyWriteRowRequest{
TableName: t.c.fullTableName(t.table),
RowKey: []byte(row),
Rules: m.ops,
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
RowKey: []byte(row),
Rules: m.ops,
}
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
if err != nil {

View File

@ -19,13 +19,16 @@ package bigtable
import (
"fmt"
"math/rand"
"reflect"
"strings"
"sync"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
func TestPrefix(t *testing.T) {
@ -53,6 +56,29 @@ func TestPrefix(t *testing.T) {
}
}
func TestApplyErrors(t *testing.T) {
ctx := context.Background()
table := &Table{
c: &Client{
project: "P",
instance: "I",
},
table: "t",
}
f := ColumnFilter("C")
m := NewMutation()
m.DeleteRow()
// Test nested conditional mutations.
cm := NewCondMutation(f, NewCondMutation(f, m, nil), nil)
if err := table.Apply(ctx, "x", cm); err == nil {
t.Error("got nil, want error")
}
cm = NewCondMutation(f, nil, NewCondMutation(f, m, nil))
if err := table.Apply(ctx, "x", cm); err == nil {
t.Error("got nil, want error")
}
}
func TestClientIntegration(t *testing.T) {
start := time.Now()
lastCheckpoint := start
@ -67,14 +93,16 @@ func TestClientIntegration(t *testing.T) {
t.Fatalf("IntegrationEnv: %v", err)
}
timeout := 30 * time.Second
var timeout time.Duration
if testEnv.Config().UseProd {
timeout = 5 * time.Minute
timeout = 10 * time.Minute
t.Logf("Running test against production")
} else {
timeout = 1 * time.Minute
t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
}
ctx, _ := context.WithTimeout(context.Background(), timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
client, err := testEnv.NewClient()
if err != nil {
@ -126,6 +154,11 @@ func TestClientIntegration(t *testing.T) {
}
checkpoint("inserted initial data")
if err := adminClient.WaitForReplication(ctx, table); err != nil {
t.Errorf("Waiting for replication for table %q: %v", table, err)
}
checkpoint("waited for replication")
// Do a conditional mutation with a complex filter.
mutTrue := NewMutation()
mutTrue.Set("follows", "wmckinley", 0, []byte("1"))
@ -156,7 +189,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")},
},
}
if !reflect.DeepEqual(row, wantRow) {
if !testutil.Equal(row, wantRow) {
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
}
checkpoint("tested ReadRow")
@ -319,6 +352,12 @@ func TestClientIntegration(t *testing.T) {
filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil),
want: "",
},
{
desc: "chain that ends with an interleave that has no match. covers #804",
rr: RowRange{},
filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*"))), StripValueFilter(), nil),
want: "",
},
}
for _, tc := range readTests {
var opts []ReadOption
@ -442,9 +481,13 @@ func TestClientIntegration(t *testing.T) {
if err != nil {
t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err)
}
// Make sure the modified cell returned by the RMW operation has a timestamp.
if row["counter"][0].Timestamp == 0 {
t.Errorf("RMW returned cell timestamp: got %v, want > 0", row["counter"][0].Timestamp)
}
clearTimestamps(row)
wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}}
if !reflect.DeepEqual(row, wantRow) {
if !testutil.Equal(row, wantRow) {
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
}
}
@ -498,7 +541,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
}
// Do the same read, but filter to the latest two versions.
@ -512,7 +555,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
}
// Check cell offset / limit
@ -525,7 +568,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow)
}
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3)))
@ -539,7 +582,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow)
}
// Check timestamp range filtering (with truncation)
@ -553,7 +596,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow)
}
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0)))
@ -568,7 +611,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow)
}
// Delete non-existing cells, no such column family in this row
@ -585,7 +628,7 @@ func TestClientIntegration(t *testing.T) {
if err != nil {
t.Fatalf("Reading row: %v", err)
}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
}
// Delete non-existing cells, no such column in this column family
@ -599,7 +642,7 @@ func TestClientIntegration(t *testing.T) {
if err != nil {
t.Fatalf("Reading row: %v", err)
}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
}
// Delete the cell with timestamp 2000 and repeat the last read,
@ -619,7 +662,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
}
checkpoint("tested multiple versions in a cell")
@ -654,7 +697,7 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{
{Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow)
}
@ -672,7 +715,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")},
},
}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow)
}
checkpoint("tested family delete")
@ -700,7 +743,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")},
},
}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
}
mut = NewMutation()
@ -717,7 +760,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")},
},
}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
}
mut = NewMutation()
@ -742,7 +785,7 @@ func TestClientIntegration(t *testing.T) {
if err != nil {
t.Fatalf("Reading row: %v", err)
}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow)
}
checkpoint("tested column delete")
@ -791,7 +834,7 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{
{Row: "bigrow", Column: "ts:col", Value: bigBytes},
}}
if !reflect.DeepEqual(r, wantRow) {
if !testutil.Equal(r, wantRow) {
t.Errorf("Big read returned incorrect bytes: %v", r)
}
// Now write 1000 rows, each with 82 KB values, then scan them all.
@ -879,7 +922,7 @@ func TestClientIntegration(t *testing.T) {
wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")})
}
wantRow := Row{"bulk": wantItems}
if !reflect.DeepEqual(row, wantRow) {
if !testutil.Equal(row, wantRow) {
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
}
}
@ -902,6 +945,103 @@ func TestClientIntegration(t *testing.T) {
}
}
type requestCountingInterceptor struct {
grpc.ClientStream
requestCallback func()
}
func (i *requestCountingInterceptor) SendMsg(m interface{}) error {
i.requestCallback()
return i.ClientStream.SendMsg(m)
}
func (i *requestCountingInterceptor) RecvMsg(m interface{}) error {
return i.ClientStream.RecvMsg(m)
}
func requestCallback(callback func()) func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
clientStream, err := streamer(ctx, desc, cc, method, opts...)
return &requestCountingInterceptor{
ClientStream: clientStream,
requestCallback: callback,
}, err
}
}
// TestReadRowsInvalidRowSet verifies that the client doesn't send ReadRows() requests with invalid RowSets.
func TestReadRowsInvalidRowSet(t *testing.T) {
testEnv, err := NewEmulatedEnv(IntegrationTestConfig{})
if err != nil {
t.Fatalf("NewEmulatedEnv failed: %v", err)
}
var requestCount int
incrementRequestCount := func() { requestCount++ }
conn, err := grpc.Dial(testEnv.server.Addr, grpc.WithInsecure(), grpc.WithBlock(),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)),
grpc.WithStreamInterceptor(requestCallback(incrementRequestCount)),
)
if err != nil {
t.Fatalf("grpc.Dial failed: %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
adminClient, err := NewAdminClient(ctx, testEnv.config.Project, testEnv.config.Instance, option.WithGRPCConn(conn))
if err != nil {
t.Fatalf("NewClient failed: %v", err)
}
defer adminClient.Close()
if err := adminClient.CreateTable(ctx, testEnv.config.Table); err != nil {
t.Fatalf("CreateTable(%v) failed: %v", testEnv.config.Table, err)
}
client, err := NewClient(ctx, testEnv.config.Project, testEnv.config.Instance, option.WithGRPCConn(conn))
if err != nil {
t.Fatalf("NewClient failed: %v", err)
}
defer client.Close()
table := client.Open(testEnv.config.Table)
tests := []struct {
rr RowSet
valid bool
}{
{
rr: RowRange{},
valid: true,
},
{
rr: RowRange{start: "b"},
valid: true,
},
{
rr: RowRange{start: "b", limit: "c"},
valid: true,
},
{
rr: RowRange{start: "b", limit: "a"},
valid: false,
},
{
rr: RowList{"a"},
valid: true,
},
{
rr: RowList{},
valid: false,
},
}
for _, test := range tests {
requestCount = 0
err = table.ReadRows(ctx, test.rr, func(r Row) bool { return true })
if err != nil {
t.Fatalf("ReadRows(%v) failed: %v", test.rr, err)
}
requestValid := requestCount != 0
if requestValid != test.valid {
t.Errorf("%s: got %v, want %v", test.rr, requestValid, test.valid)
}
}
}
func formatReadItem(ri ReadItem) string {
// Use the column qualifier only to make the test data briefer.
col := ri.Column[strings.Index(ri.Column, ":")+1:]

View File

@ -28,7 +28,7 @@ import (
func ExampleNewServer() {
srv, err := bttest.NewServer("127.0.0.1:0")
srv, err := bttest.NewServer("localhost:0")
if err != nil {
log.Fatalln(err)

View File

@ -19,7 +19,7 @@ Package bttest contains test helpers for working with the bigtable package.
To use a Server, create it, and then connect to it with no security:
(The project/instance values are ignored.)
srv, err := bttest.NewServer("127.0.0.1:0")
srv, err := bttest.NewServer("localhost:0")
...
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
...
@ -45,12 +45,14 @@ import (
emptypb "github.com/golang/protobuf/ptypes/empty"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/google/btree"
"golang.org/x/net/context"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
statpb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Server is an in-memory Cloud Bigtable fake.
@ -162,7 +164,7 @@ func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest)
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.tables[req.Name]; !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
}
delete(s.tables, req.Name)
return &emptypb.Empty{}, nil
@ -224,65 +226,103 @@ func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeReques
defer s.mu.Unlock()
tbl, ok := s.tables[req.Name]
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
}
if req.GetDeleteAllDataFromTable() {
tbl.rows = nil
tbl.rowIndex = make(map[string]*row)
tbl.rows = btree.New(btreeDegree)
} else {
// Delete rows by prefix
// Delete rows by prefix.
prefixBytes := req.GetRowKeyPrefix()
if prefixBytes == nil {
return nil, fmt.Errorf("missing row key prefix")
}
prefix := string(prefixBytes)
start := -1
end := 0
for i, row := range tbl.rows {
match := strings.HasPrefix(row.key, prefix)
if match {
// Delete the mapping. Row will be deleted from sorted range below.
delete(tbl.rowIndex, row.key)
// The BTree does not specify what happens if rows are deleted during
// iteration, and it provides no "delete range" method.
// So we collect the rows first, then delete them one by one.
var rowsToDelete []*row
tbl.rows.AscendGreaterOrEqual(btreeKey(prefix), func(i btree.Item) bool {
r := i.(*row)
if strings.HasPrefix(r.key, prefix) {
rowsToDelete = append(rowsToDelete, r)
return true
} else {
return false // stop iteration
}
if match && start == -1 {
start = i
} else if !match && start != -1 {
break
}
end++
}
if start != -1 {
// Delete the range, using method from https://github.com/golang/go/wiki/SliceTricks
copy(tbl.rows[start:], tbl.rows[end:])
for k, n := len(tbl.rows)-end+start, len(tbl.rows); k < n; k++ {
tbl.rows[k] = nil
}
tbl.rows = tbl.rows[:len(tbl.rows)-end+start]
})
for _, r := range rowsToDelete {
tbl.rows.Delete(r)
}
}
return &emptypb.Empty{}, nil
}
// This is a private alpha release of Cloud Bigtable replication. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (s *server) GenerateConsistencyToken(ctx context.Context, req *btapb.GenerateConsistencyTokenRequest) (*btapb.GenerateConsistencyTokenResponse, error) {
// Check that the table exists.
_, ok := s.tables[req.Name]
if !ok {
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
}
return &btapb.GenerateConsistencyTokenResponse{
ConsistencyToken: "TokenFor-" + req.Name,
}, nil
}
// This is a private alpha release of Cloud Bigtable replication. This feature
// is not currently available to most Cloud Bigtable customers. This feature
// might be changed in backward-incompatible ways and is not recommended for
// production use. It is not subject to any SLA or deprecation policy.
func (s *server) CheckConsistency(ctx context.Context, req *btapb.CheckConsistencyRequest) (*btapb.CheckConsistencyResponse, error) {
// Check that the table exists.
_, ok := s.tables[req.Name]
if !ok {
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
}
// Check this is the right token.
if req.ConsistencyToken != "TokenFor-"+req.Name {
return nil, status.Errorf(codes.InvalidArgument, "token %q not valid", req.ConsistencyToken)
}
// Single cluster instances are always consistent.
return &btapb.CheckConsistencyResponse{
Consistent: true,
}, nil
}
func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error {
s.mu.Lock()
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
return status.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
// Rows to read can be specified by a set of row keys and/or a set of row ranges.
// Output is a stream of sorted, de-duped rows.
tbl.mu.RLock()
rowSet := make(map[string]*row)
addRow := func(i btree.Item) bool {
r := i.(*row)
rowSet[r.key] = r
return true
}
if req.Rows != nil {
// Add the explicitly given keys
for _, key := range req.Rows.RowKeys {
start := string(key)
addRows(start, start+"\x00", tbl, rowSet)
k := string(key)
if i := tbl.rows.Get(btreeKey(k)); i != nil {
addRow(i)
}
}
// Add keys from row ranges
@ -300,12 +340,20 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
case *btpb.RowRange_EndKeyOpen:
end = string(ek.EndKeyOpen)
}
addRows(start, end, tbl, rowSet)
switch {
case start == "" && end == "":
tbl.rows.Ascend(addRow) // all rows
case start == "":
tbl.rows.AscendLessThan(btreeKey(end), addRow)
case end == "":
tbl.rows.AscendGreaterOrEqual(btreeKey(start), addRow)
default:
tbl.rows.AscendRange(btreeKey(start), btreeKey(end), addRow)
}
}
} else {
// Read all rows
addRows("", "", tbl, rowSet)
tbl.rows.Ascend(addRow)
}
tbl.mu.RUnlock()
@ -332,21 +380,6 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
return nil
}
func addRows(start, end string, tbl *table, rowSet map[string]*row) {
si, ei := 0, len(tbl.rows) // half-open interval
if start != "" {
si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start })
}
if end != "" {
ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end })
}
if si < ei {
for _, row := range tbl.rows[si:ei] {
rowSet[row.key] = row
}
}
}
// streamRow filters the given row and sends it via the given stream.
// Returns true if at least one cell matched the filter and was streamed, false otherwise.
func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (bool, error) {
@ -421,12 +454,14 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
}
}
}
var count int
for _, fam := range r.families {
for _, cs := range fam.cells {
sort.Sort(byDescTS(cs))
count += len(cs)
}
}
return true
return count > 0
case *btpb.RowFilter_CellsPerColumnLimitFilter:
lim := int(f.CellsPerColumnLimitFilter)
for _, fam := range r.families {
@ -621,12 +656,11 @@ func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*bt
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
fs := tbl.columnFamilies()
r, _ := tbl.mutableRow(string(req.RowKey))
r := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock
defer r.mu.Unlock()
if err := applyMutations(tbl, r, req.Mutations, fs); err != nil {
return nil, err
@ -639,15 +673,14 @@ func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_Mu
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
return status.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))}
fs := tbl.columnFamilies()
defer tbl.resortRowIndex()
for i, entry := range req.Entries {
r, _ := tbl.mutableRow(string(entry.RowKey))
r := tbl.mutableRow(string(entry.RowKey))
r.mu.Lock()
code, msg := int32(codes.OK), ""
if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil {
@ -669,13 +702,13 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
res := &btpb.CheckAndMutateRowResponse{}
fs := tbl.columnFamilies()
r, _ := tbl.mutableRow(string(req.RowKey))
r := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer r.mu.Unlock()
@ -690,16 +723,13 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
nr := r.copy()
filterRow(req.PredicateFilter, nr)
whichMut = !nr.isEmpty()
// TODO(dsymonds): Figure out if this is supposed to be set
// even when there's no predicate filter.
res.PredicateMatched = whichMut
}
res.PredicateMatched = whichMut
muts := req.FalseMutations
if whichMut {
muts = req.TrueMutations
}
defer tbl.resortRowIndex()
if err := applyMutations(tbl, r, muts, fs); err != nil {
return nil, err
}
@ -824,18 +854,15 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
updates := make(map[string]cell) // copy of updated cells; keyed by full column name
fs := tbl.columnFamilies()
rowKey := string(req.RowKey)
r, isNewRow := tbl.mutableRow(rowKey)
r := tbl.mutableRow(rowKey)
// This must be done before the row lock, acquired below, is released.
if isNewRow {
defer tbl.resortRowIndex()
}
r.mu.Lock()
defer r.mu.Unlock()
// Assume all mutations apply to the most recent version of the cell.
@ -907,7 +934,8 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
f.Columns = append(f.Columns, &btpb.Column{
Qualifier: []byte(qual),
Cells: []*btpb.Cell{{
Value: cell.value,
TimestampMicros: cell.ts,
Value: cell.value,
}},
})
}
@ -919,7 +947,7 @@ func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigta
tbl, ok := s.tables[req.TableName]
s.mu.Unlock()
if !ok {
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
return status.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
tbl.mu.RLock()
@ -928,20 +956,25 @@ func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigta
// The return value of SampleRowKeys is very loosely defined. Return at least the
// final row key in the table and choose other row keys randomly.
var offset int64
for i, row := range tbl.rows {
if i == len(tbl.rows)-1 || rand.Int31n(100) == 0 {
var err error
i := 0
tbl.rows.Ascend(func(it btree.Item) bool {
row := it.(*row)
if i == tbl.rows.Len()-1 || rand.Int31n(100) == 0 {
resp := &btpb.SampleRowKeysResponse{
RowKey: []byte(row.key),
OffsetBytes: offset,
}
err := stream.Send(resp)
err = stream.Send(resp)
if err != nil {
return err
return false
}
}
offset += int64(row.size())
}
return nil
i++
return true
})
return err
}
// needGC is invoked whenever the server needs gcloop running.
@ -986,10 +1019,11 @@ type table struct {
mu sync.RWMutex
counter uint64 // increment by 1 when a new family is created
families map[string]*columnFamily // keyed by plain family name
rows []*row // sorted by row key
rowIndex map[string]*row // indexed by row key
rows *btree.BTree // indexed by row key
}
const btreeDegree = 16
func newTable(ctr *btapb.CreateTableRequest) *table {
fams := make(map[string]*columnFamily)
c := uint64(0)
@ -1006,7 +1040,7 @@ func newTable(ctr *btapb.CreateTableRequest) *table {
return &table{
families: fams,
counter: c,
rowIndex: make(map[string]*row),
rows: btree.New(btreeDegree),
}
}
@ -1025,31 +1059,26 @@ func (t *table) columnFamilies() map[string]*columnFamily {
return cp
}
func (t *table) mutableRow(row string) (mutRow *row, isNewRow bool) {
func (t *table) mutableRow(key string) *row {
bkey := btreeKey(key)
// Try fast path first.
t.mu.RLock()
r := t.rowIndex[row]
i := t.rows.Get(bkey)
t.mu.RUnlock()
if r != nil {
return r, false
if i != nil {
return i.(*row)
}
// We probably need to create the row.
t.mu.Lock()
r = t.rowIndex[row]
if r == nil {
r = newRow(row)
t.rowIndex[row] = r
t.rows = append(t.rows, r)
defer t.mu.Unlock()
i = t.rows.Get(bkey)
if i != nil {
return i.(*row)
}
t.mu.Unlock()
return r, true
}
func (t *table) resortRowIndex() {
t.mu.Lock()
sort.Sort(byRowKey(t.rows))
t.mu.Unlock()
r := newRow(key)
t.rows.ReplaceOrInsert(r)
return r
}
func (t *table) gc() {
@ -1068,11 +1097,13 @@ func (t *table) gc() {
return
}
for _, r := range t.rows {
t.rows.Ascend(func(i btree.Item) bool {
r := i.(*row)
r.mu.Lock()
r.gc(rules)
r.mu.Unlock()
}
return true
})
}
type byRowKey []*row
@ -1176,6 +1207,14 @@ func (r *row) size() int {
return size
}
// Less implements btree.Less.
func (r *row) Less(i btree.Item) bool {
return r.key < i.(*row).key
}
// btreeKey returns a row for use as a key into the BTree.
func btreeKey(s string) *row { return &row{key: s} }
func (r *row) String() string {
return r.key
}

View File

@ -17,6 +17,7 @@ package bttest
import (
"fmt"
"math/rand"
"strconv"
"sync"
"sync/atomic"
"testing"
@ -26,7 +27,6 @@ import (
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
"google.golang.org/grpc"
"strconv"
)
func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
@ -271,7 +271,7 @@ func TestDropRowRange(t *testing.T) {
}
doWrite()
tblSize := len(tbl.rows)
tblSize := tbl.rows.Len()
req := &btapb.DropRowRangeRequest{
Name: tblInfo.Name,
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")},
@ -279,7 +279,7 @@ func TestDropRowRange(t *testing.T) {
if _, err = s.DropRowRange(ctx, req); err != nil {
t.Fatalf("Dropping first range: %v", err)
}
got, want := len(tbl.rows), tblSize-count
got, want := tbl.rows.Len(), tblSize-count
if got != want {
t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want)
}
@ -291,7 +291,7 @@ func TestDropRowRange(t *testing.T) {
if _, err = s.DropRowRange(ctx, req); err != nil {
t.Fatalf("Dropping second range: %v", err)
}
got, want = len(tbl.rows), tblSize-(2*count)
got, want = tbl.rows.Len(), tblSize-(2*count)
if got != want {
t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want)
}
@ -303,7 +303,7 @@ func TestDropRowRange(t *testing.T) {
if _, err = s.DropRowRange(ctx, req); err != nil {
t.Fatalf("Dropping invalid range: %v", err)
}
got, want = len(tbl.rows), tblSize-(2*count)
got, want = tbl.rows.Len(), tblSize-(2*count)
if got != want {
t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want)
}
@ -315,7 +315,7 @@ func TestDropRowRange(t *testing.T) {
if _, err = s.DropRowRange(ctx, req); err != nil {
t.Fatalf("Dropping all data: %v", err)
}
got, want = len(tbl.rows), 0
got, want = tbl.rows.Len(), 0
if got != want {
t.Errorf("Row count after drop all: got %d, want %d", got, want)
}
@ -331,13 +331,13 @@ func TestDropRowRange(t *testing.T) {
if _, err = s.DropRowRange(ctx, req); err != nil {
t.Fatalf("Dropping all data: %v", err)
}
got, want = len(tbl.rows), 0
got, want = tbl.rows.Len(), 0
if got != want {
t.Errorf("Row count after drop all: got %d, want %d", got, want)
}
doWrite()
got, want = len(tbl.rows), len(prefixes)
got, want = tbl.rows.Len(), len(prefixes)
if got != want {
t.Errorf("Row count after rewrite: got %d, want %d", got, want)
}
@ -350,7 +350,7 @@ func TestDropRowRange(t *testing.T) {
t.Fatalf("Dropping range: %v", err)
}
doWrite()
got, want = len(tbl.rows), len(prefixes)
got, want = tbl.rows.Len(), len(prefixes)
if got != want {
t.Errorf("Row count after drop range: got %d, want %d", got, want)
}
@ -366,6 +366,56 @@ func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error {
return nil
}
func TestReadRows(t *testing.T) {
ctx := context.Background()
s := &server{
tables: make(map[string]*table),
}
newTbl := btapb.Table{
ColumnFamilies: map[string]*btapb.ColumnFamily{
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
},
}
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
if err != nil {
t.Fatalf("Creating table: %v", err)
}
mreq := &btpb.MutateRowRequest{
TableName: tblInfo.Name,
RowKey: []byte("row"),
Mutations: []*btpb.Mutation{{
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
FamilyName: "cf0",
ColumnQualifier: []byte("col"),
TimestampMicros: 1000,
Value: []byte{},
}},
}},
}
if _, err := s.MutateRow(ctx, mreq); err != nil {
t.Fatalf("Populating table: %v", err)
}
for _, rowset := range []*btpb.RowSet{
{RowKeys: [][]byte{[]byte("row")}},
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{[]byte("")}}}},
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{[]byte("r")}}}},
{RowRanges: []*btpb.RowRange{{
StartKey: &btpb.RowRange_StartKeyClosed{[]byte("")},
EndKey: &btpb.RowRange_EndKeyOpen{[]byte("s")},
}}},
} {
mock := &MockReadRowsServer{}
req := &btpb.ReadRowsRequest{TableName: tblInfo.Name, Rows: rowset}
if err = s.ReadRows(req, mock); err != nil {
t.Fatalf("ReadRows error: %v", err)
}
if got, want := len(mock.responses), 1; got != want {
t.Errorf("%+v: response count: got %d, want %d", rowset, got, want)
}
}
}
func TestReadRowsOrder(t *testing.T) {
s := &server{
tables: make(map[string]*table),
@ -430,7 +480,7 @@ func TestReadRowsOrder(t *testing.T) {
t.Fatal("Response count: got 0, want > 0")
}
if len(mock.responses[0].Chunks) != 27 {
t.Fatal("Chunk count: got %d, want 27", len(mock.responses[0].Chunks))
t.Fatalf("Chunk count: got %d, want 27", len(mock.responses[0].Chunks))
}
testOrder := func(ms *MockReadRowsServer) {
var prevFam, prevCol string
@ -480,7 +530,7 @@ func TestReadRowsOrder(t *testing.T) {
t.Fatal("Response count: got 0, want > 0")
}
if len(mock.responses[0].Chunks) != 18 {
t.Fatal("Chunk count: got %d, want 18", len(mock.responses[0].Chunks))
t.Fatalf("Chunk count: got %d, want 18", len(mock.responses[0].Chunks))
}
testOrder(mock)
@ -511,7 +561,61 @@ func TestReadRowsOrder(t *testing.T) {
t.Fatal("Response count: got 0, want > 0")
}
if len(mock.responses[0].Chunks) != 30 {
t.Fatal("Chunk count: got %d, want 30", len(mock.responses[0].Chunks))
t.Fatalf("Chunk count: got %d, want 30", len(mock.responses[0].Chunks))
}
testOrder(mock)
}
func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
s := &server{
tables: make(map[string]*table),
}
ctx := context.Background()
newTbl := btapb.Table{
ColumnFamilies: map[string]*btapb.ColumnFamily{
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
},
}
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
if err != nil {
t.Fatalf("Creating table: %v", err)
}
// Populate the table
val := []byte("value")
mrreq := &btpb.MutateRowRequest{
TableName: tbl.Name,
RowKey: []byte("row-present"),
Mutations: []*btpb.Mutation{{
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
FamilyName: "cf",
ColumnQualifier: []byte("col"),
TimestampMicros: 0,
Value: val,
}},
}},
}
if _, err := s.MutateRow(ctx, mrreq); err != nil {
t.Fatalf("Populating table: %v", err)
}
req := &btpb.CheckAndMutateRowRequest{
TableName: tbl.Name,
RowKey: []byte("row-not-present"),
}
if res, err := s.CheckAndMutateRow(ctx, req); err != nil {
t.Errorf("CheckAndMutateRow error: %v", err)
} else if got, want := res.PredicateMatched, false; got != want {
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
}
req = &btpb.CheckAndMutateRowRequest{
TableName: tbl.Name,
RowKey: []byte("row-present"),
}
if res, err := s.CheckAndMutateRow(ctx, req); err != nil {
t.Errorf("CheckAndMutateRow error: %v", err)
} else if got, want := res.PredicateMatched, true; got != want {
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
}
}

View File

@ -34,9 +34,12 @@ import (
"text/template"
"time"
"encoding/csv"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/internal/cbtconfig"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
@ -64,7 +67,7 @@ func getCredentialOpts(opts []option.ClientOption) []option.ClientOption {
return opts
}
func getClient() *bigtable.Client {
func getClient(clientConf bigtable.ClientConfig) *bigtable.Client {
if client == nil {
var opts []option.ClientOption
if ep := config.DataEndpoint; ep != "" {
@ -72,7 +75,7 @@ func getClient() *bigtable.Client {
}
opts = getCredentialOpts(opts)
var err error
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...)
client, err = bigtable.NewClientWithConfig(context.Background(), config.Project, config.Instance, clientConf, opts...)
if err != nil {
log.Fatalf("Making bigtable.Client: %v", err)
}
@ -170,10 +173,15 @@ func init() {
}
tw.Flush()
buf.WriteString(configHelp)
buf.WriteString("\ncbt ` + version + ` ` + revision + ` ` + revisionDate + `")
cmdSummary = buf.String()
}
var configHelp = `
Alpha features are not currently available to most Cloud Bigtable customers. The
features might be changed in backward-incompatible ways and are not recommended
for production use. They are not subject to any SLA or deprecation policy.
For convenience, values of the -project, -instance, -creds,
-admin-endpoint and -data-endpoint flags may be specified in
` + cbtconfig.Filename() + ` in this format:
@ -183,8 +191,6 @@ For convenience, values of the -project, -instance, -creds,
admin-endpoint = hostname:port
data-endpoint = hostname:port
All values are optional, and all will be overridden by flags.
cbt ` + version + ` ` + revision + ` ` + revisionDate + `
`
var commands = []struct {
@ -200,6 +206,30 @@ var commands = []struct {
Usage: "cbt count <table>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "createinstance",
Desc: "Create an instance with an initial cluster",
do: doCreateInstance,
Usage: "cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type>\n" +
" instance-id Permanent, unique id for the instance\n" +
" display-name Description of the instance\n" +
" cluster-id Permanent, unique id for the cluster in the instance\n" +
" zone The zone in which to create the cluster\n" +
" num-nodes The number of nodes to create\n" +
" storage-type SSD or HDD\n",
Required: cbtconfig.ProjectRequired,
},
{
Name: "createcluster",
Desc: "Create a cluster in the configured instance (replication alpha)",
do: doCreateCluster,
Usage: "cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>\n" +
" cluster-id Permanent, unique id for the cluster in the instance\n" +
" zone The zone in which to create the cluster\n" +
" num-nodes The number of nodes to create\n" +
" storage-type SSD or HDD\n",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "createfamily",
Desc: "Create a column family",
@ -211,9 +241,41 @@ var commands = []struct {
Name: "createtable",
Desc: "Create a table",
do: doCreateTable,
Usage: "cbt createtable <table> [initial_splits...]\n" +
" initial_splits=row A row key to be used to initially split the table " +
"into multiple tablets. Can be repeated to create multiple splits.",
Usage: "cbt createtable <table> [families=family[:(maxage=<d> | maxversions=<n>)],...] [splits=split,...]\n" +
" families: Column families and their associated GC policies. See \"setgcpolicy\".\n" +
" Example: families=family1:maxage=1w,family2:maxversions=1\n" +
" splits: Row key to be used to initially split the table",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "updatecluster",
Desc: "Update a cluster in the configured instance",
do: doUpdateCluster,
Usage: "cbt updatecluster <cluster-id> [num-nodes=num-nodes]\n" +
" cluster-id Permanent, unique id for the cluster in the instance\n" +
" num-nodes The number of nodes to update to",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "deleteinstance",
Desc: "Deletes an instance",
do: doDeleteInstance,
Usage: "cbt deleteinstance <instance>",
Required: cbtconfig.ProjectRequired,
},
{
Name: "deletecluster",
Desc: "Deletes a cluster from the configured instance (replication alpha)",
do: doDeleteCluster,
Usage: "cbt deletecluster <cluster>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "deletecolumn",
Desc: "Delete all cells in a column",
do: doDeleteColumn,
Usage: "cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]\n" +
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
@ -224,10 +286,11 @@ var commands = []struct {
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "deleterow",
Desc: "Delete a row",
do: doDeleteRow,
Usage: "cbt deleterow <table> <row>",
Name: "deleterow",
Desc: "Delete a row",
do: doDeleteRow,
Usage: "cbt deleterow <table> <row> [app-profile=<app profile id>]\n" +
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
@ -259,10 +322,18 @@ var commands = []struct {
Required: cbtconfig.ProjectRequired,
},
{
Name: "lookup",
Desc: "Read from a single row",
do: doLookup,
Usage: "cbt lookup <table> <row>",
Name: "listclusters",
Desc: "List instances in an instance",
do: doListClusters,
Usage: "cbt listclusters",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "lookup",
Desc: "Read from a single row",
do: doLookup,
Usage: "cbt lookup <table> <row> [app-profile=<app profile id>]\n" +
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
@ -284,18 +355,22 @@ var commands = []struct {
Name: "read",
Desc: "Read rows",
do: doRead,
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]\n" +
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>]" +
" [regex=<regex>] [count=<n>] [app-profile=<app profile id>]\n" +
" start=<row> Start reading at this row\n" +
" end=<row> Stop reading before this row\n" +
" prefix=<prefix> Read rows with this prefix\n" +
" count=<n> Read only this many rows\n",
" regex=<regex> Read rows with keys matching this regex\n" +
" count=<n> Read only this many rows\n" +
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "set",
Desc: "Set value of a cell",
do: doSet,
Usage: "cbt set <table> <row> family:column=val[@ts] ...\n" +
Usage: "cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...\n" +
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n" +
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
"\n" +
" ts is an optional integer timestamp.\n" +
@ -313,6 +388,53 @@ var commands = []struct {
" maxversions=<n> Maximum number of versions to preserve",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "waitforreplication",
Desc: "Blocks until all the completed writes have been replicated to all the clusters (replication alpha)",
do: doWaitForReplicaiton,
Usage: "cbt waitforreplication <table>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "createtablefromsnapshot",
Desc: "Create a table from a snapshot (snapshots alpha)",
do: doCreateTableFromSnapshot,
Usage: "cbt createtablefromsnapshot <table> <cluster> <snapshot>\n" +
" table The name of the table to create\n" +
" cluster The cluster where the snapshot is located\n" +
" snapshot The snapshot to restore",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "createsnapshot",
Desc: "Create a snapshot from a source table (snapshots alpha)",
do: doSnapshotTable,
Usage: "cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>]\n" +
"\n" +
` [ttl=<d>] Lifespan of the snapshot (e.g. "1h", "4d")` + "\n",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "listsnapshots",
Desc: "List snapshots in a cluster (snapshots alpha)",
do: doListSnapshots,
Usage: "cbt listsnapshots [<cluster>]",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "getsnapshot",
Desc: "Get snapshot info (snapshots alpha)",
do: doGetSnapshot,
Usage: "cbt getsnapshot <cluster> <snapshot>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "deletesnapshot",
Desc: "Delete snapshot in a cluster (snapshots alpha)",
do: doDeleteSnapshot,
Usage: "cbt deletesnapshot <cluster> <snapshot>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "version",
Desc: "Print the current cbt version",
@ -326,7 +448,7 @@ func doCount(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatal("usage: cbt count <table>")
}
tbl := getClient().Open(args[0])
tbl := getClient(bigtable.ClientConfig{}).Open(args[0])
n := 0
err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool {
@ -339,6 +461,51 @@ func doCount(ctx context.Context, args ...string) {
fmt.Println(n)
}
func doCreateTable(ctx context.Context, args ...string) {
if len(args) < 1 {
log.Fatal("usage: cbt createtable <table> [families=family[:gcpolicy],...] [splits=split,...]")
}
tblConf := bigtable.TableConf{TableID: args[0]}
for _, arg := range args[1:] {
i := strings.Index(arg, "=")
if i < 0 {
log.Fatalf("Bad arg %q", arg)
}
key, val := arg[:i], arg[i+1:]
chunks, err := csv.NewReader(strings.NewReader(val)).Read()
if err != nil {
log.Fatalf("Invalid families arg format: %v", err)
}
switch key {
default:
log.Fatalf("Unknown arg key %q", key)
case "families":
tblConf.Families = make(map[string]bigtable.GCPolicy)
for _, family := range chunks {
famPolicy := strings.Split(family, ":")
var gcPolicy bigtable.GCPolicy
if len(famPolicy) < 2 {
gcPolicy = bigtable.MaxVersionsPolicy(1)
log.Printf("Using default GC Policy of %v for family %v", gcPolicy, family)
} else {
gcPolicy, err = parseGCPolicy(famPolicy[1])
if err != nil {
log.Fatal(err)
}
}
tblConf.Families[famPolicy[0]] = gcPolicy
}
case "splits":
tblConf.SplitKeys = chunks
}
}
if err := getAdminClient().CreateTableFromConf(ctx, &tblConf); err != nil {
log.Fatalf("Creating table: %v", err)
}
}
func doCreateFamily(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt createfamily <table> <family>")
@ -349,19 +516,135 @@ func doCreateFamily(ctx context.Context, args ...string) {
}
}
func doCreateTable(ctx context.Context, args ...string) {
if len(args) < 1 {
log.Fatal("usage: cbt createtable <table> [initial_splits...]")
}
var err error
if len(args) > 1 {
splits := args[1:]
err = getAdminClient().CreatePresplitTable(ctx, args[0], splits)
} else {
err = getAdminClient().CreateTable(ctx, args[0])
func doCreateInstance(ctx context.Context, args ...string) {
if len(args) < 6 {
log.Fatal("cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type>")
}
numNodes, err := strconv.ParseInt(args[4], 0, 32)
if err != nil {
log.Fatalf("Creating table: %v", err)
log.Fatalf("Bad num-nodes %q: %v", args[4], err)
}
sType, err := parseStorageType(args[5])
if err != nil {
log.Fatal(err)
}
ic := bigtable.InstanceWithClustersConfig{
InstanceID: args[0],
DisplayName: args[1],
Clusters: []bigtable.ClusterConfig{{
ClusterID: args[2],
Zone: args[3],
NumNodes: int32(numNodes),
StorageType: sType,
}},
}
err = getInstanceAdminClient().CreateInstanceWithClusters(ctx, &ic)
if err != nil {
log.Fatalf("Creating instance: %v", err)
}
}
func doCreateCluster(ctx context.Context, args ...string) {
if len(args) < 4 {
log.Fatal("usage: cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>")
}
numNodes, err := strconv.ParseInt(args[2], 0, 32)
if err != nil {
log.Fatalf("Bad num_nodes %q: %v", args[2], err)
}
sType, err := parseStorageType(args[3])
if err != nil {
log.Fatal(err)
}
cc := bigtable.ClusterConfig{
InstanceID: config.Instance,
ClusterID: args[0],
Zone: args[1],
NumNodes: int32(numNodes),
StorageType: sType,
}
err = getInstanceAdminClient().CreateCluster(ctx, &cc)
if err != nil {
log.Fatalf("Creating cluster: %v", err)
}
}
func doUpdateCluster(ctx context.Context, args ...string) {
if len(args) < 2 {
log.Fatal("cbt updatecluster <cluster-id> [num-nodes=num-nodes]")
}
numNodes := int64(0)
var err error
for _, arg := range args[1:] {
i := strings.Index(arg, "=")
if i < 0 {
log.Fatalf("Bad arg %q", arg)
}
key, val := arg[:i], arg[i+1:]
switch key {
default:
log.Fatalf("Unknown arg key %q", key)
case "num-nodes":
numNodes, err = strconv.ParseInt(val, 0, 32)
if err != nil {
log.Fatalf("Bad num-nodes %q: %v", val, err)
}
}
}
if numNodes > 0 {
err = getInstanceAdminClient().UpdateCluster(ctx, config.Instance, args[0], int32(numNodes))
if err != nil {
log.Fatalf("Updating cluster: %v", err)
}
} else {
log.Fatal("Updating cluster: nothing to update")
}
}
func doDeleteInstance(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatal("usage: cbt deleteinstance <instance>")
}
err := getInstanceAdminClient().DeleteInstance(ctx, args[0])
if err != nil {
log.Fatalf("Deleting instance: %v", err)
}
}
func doDeleteCluster(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatal("usage: cbt deletecluster <cluster>")
}
err := getInstanceAdminClient().DeleteCluster(ctx, config.Instance, args[0])
if err != nil {
log.Fatalf("Deleting cluster: %v", err)
}
}
func doDeleteColumn(ctx context.Context, args ...string) {
usage := "usage: cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]"
if len(args) != 4 || len(args) != 5 {
log.Fatal(usage)
}
var appProfile string
if len(args) == 5 {
if !strings.HasPrefix(args[4], "app-profile=") {
log.Fatal(usage)
}
appProfile = strings.Split(args[4], "=")[1]
}
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0])
mut := bigtable.NewMutation()
mut.DeleteCellsInColumn(args[2], args[3])
if err := tbl.Apply(ctx, args[1], mut); err != nil {
log.Fatalf("Deleting cells in column: %v", err)
}
}
@ -376,10 +659,18 @@ func doDeleteFamily(ctx context.Context, args ...string) {
}
func doDeleteRow(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt deleterow <table> <row>")
usage := "usage: cbt deleterow <table> <row> [app-profile=<app profile id>]"
if len(args) != 2 || len(args) != 3 {
log.Fatal(usage)
}
tbl := getClient().Open(args[0])
var appProfile string
if len(args) == 3 {
if !strings.HasPrefix(args[2], "app-profile=") {
log.Fatal(usage)
}
appProfile = strings.Split(args[2], "=")[1]
}
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0])
mut := bigtable.NewMutation()
mut.DeleteRow()
if err := tbl.Apply(ctx, args[1], mut); err != nil {
@ -429,8 +720,9 @@ func docFlags() []*flag.Flag {
func doDocReal(ctx context.Context, args ...string) {
data := map[string]interface{}{
"Commands": commands,
"Flags": docFlags(),
"Commands": commands,
"Flags": docFlags(),
"ConfigHelp": configHelp,
}
var buf bytes.Buffer
if err := docTemplate.Execute(&buf, data); err != nil {
@ -474,7 +766,9 @@ var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
//go:generate go run cbt.go -o cbtdoc.go doc
/*
Cbt is a tool for doing basic interactions with Cloud Bigtable.
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
install the cbt tool, see the
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
Usage:
@ -491,6 +785,8 @@ The options are:
-{{.Name}} string
{{.Usage}}{{end}}
{{.ConfigHelp}}
{{range .Commands}}
{{.Desc}}
@ -535,12 +831,37 @@ func doListInstances(ctx context.Context, args ...string) {
tw.Flush()
}
func doListClusters(ctx context.Context, args ...string) {
if len(args) != 0 {
log.Fatalf("usage: cbt listclusters")
}
cis, err := getInstanceAdminClient().Clusters(ctx, config.Instance)
if err != nil {
log.Fatalf("Getting list of clusters: %v", err)
}
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
fmt.Fprintf(tw, "Cluster Name\tZone\tState\n")
fmt.Fprintf(tw, "------------\t----\t----\n")
for _, ci := range cis {
fmt.Fprintf(tw, "%s\t%s\t%s (%d serve nodes)\n", ci.Name, ci.Zone, ci.State, ci.ServeNodes)
}
tw.Flush()
}
func doLookup(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatalf("usage: cbt lookup <table> <row>")
if len(args) < 2 {
log.Fatalf("usage: cbt lookup <table> <row> [app-profile=<app profile id>]")
}
var appProfile string
if len(args) > 2 {
i := strings.Index(args[2], "=")
if i < 0 {
log.Fatalf("Bad arg %q", args[2])
}
appProfile = strings.Split(args[2], "=")[1]
}
table, row := args[0], args[1]
tbl := getClient().Open(table)
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(table)
r, err := tbl.ReadRow(ctx, row)
if err != nil {
log.Fatalf("Reading row: %v", err)
@ -612,8 +933,9 @@ func doLS(ctx context.Context, args ...string) {
func doMDDocReal(ctx context.Context, args ...string) {
data := map[string]interface{}{
"Commands": commands,
"Flags": docFlags(),
"Commands": commands,
"Flags": docFlags(),
"ConfigHelp": configHelp,
}
var buf bytes.Buffer
if err := mddocTemplate.Execute(&buf, data); err != nil {
@ -643,6 +965,8 @@ The options are:
-{{.Name}} string
{{.Usage}}{{end}}
{{.ConfigHelp}}
{{range .Commands}}
## {{.Desc}}
@ -657,7 +981,6 @@ func doRead(ctx context.Context, args ...string) {
if len(args) < 1 {
log.Fatalf("usage: cbt read <table> [args ...]")
}
tbl := getClient().Open(args[0])
parsed := make(map[string]string)
for _, arg := range args[1:] {
@ -672,7 +995,7 @@ func doRead(ctx context.Context, args ...string) {
case "limit":
// Be nicer; we used to support this, but renamed it to "end".
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
case "start", "end", "prefix", "count":
case "start", "end", "prefix", "count", "regex", "app-profile":
parsed[key] = val
}
}
@ -698,8 +1021,12 @@ func doRead(ctx context.Context, args ...string) {
}
opts = append(opts, bigtable.LimitRows(n))
}
if regex := parsed["regex"]; regex != "" {
opts = append(opts, bigtable.RowFilter(bigtable.RowKeyFilter(regex)))
}
// TODO(dsymonds): Support filters.
tbl := getClient(bigtable.ClientConfig{AppProfile: parsed["app-profile"]}).Open(args[0])
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
printRow(r)
return true
@ -713,12 +1040,16 @@ var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)
func doSet(ctx context.Context, args ...string) {
if len(args) < 3 {
log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...")
log.Fatalf("usage: cbt set <table> <row> [app-profile=<app profile id>] family:[column]=val[@ts] ...")
}
tbl := getClient().Open(args[0])
var appProfile string
row := args[1]
mut := bigtable.NewMutation()
for _, arg := range args[2:] {
if strings.HasPrefix(arg, "app-profile=") {
appProfile = strings.Split(arg, "=")[1]
continue
}
m := setArg.FindStringSubmatch(arg)
if m == nil {
log.Fatalf("Bad set arg %q", arg)
@ -735,6 +1066,7 @@ func doSet(ctx context.Context, args ...string) {
}
mut.Set(m[1], m[2], ts, []byte(val))
}
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0])
if err := tbl.Apply(ctx, row, mut); err != nil {
log.Fatalf("Applying mutation: %v", err)
}
@ -747,25 +1079,169 @@ func doSetGCPolicy(ctx context.Context, args ...string) {
table := args[0]
fam := args[1]
pol, err := parseGCPolicy(args[2])
if err != nil {
log.Fatal(err)
}
if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
log.Fatalf("Setting GC policy: %v", err)
}
}
func doWaitForReplicaiton(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatalf("usage: cbt waitforreplication <table>")
}
table := args[0]
fmt.Printf("Waiting for all writes up to %s to be replicated.\n", time.Now().Format("2006/01/02-15:04:05"))
if err := getAdminClient().WaitForReplication(ctx, table); err != nil {
log.Fatalf("Waiting for replication: %v", err)
}
}
func parseGCPolicy(policyStr string) (bigtable.GCPolicy, error) {
var pol bigtable.GCPolicy
switch p := args[2]; {
switch p := policyStr; {
case strings.HasPrefix(p, "maxage="):
d, err := parseDuration(p[7:])
if err != nil {
log.Fatal(err)
return nil, err
}
pol = bigtable.MaxAgePolicy(d)
case strings.HasPrefix(p, "maxversions="):
n, err := strconv.ParseUint(p[12:], 10, 16)
if err != nil {
log.Fatal(err)
return nil, err
}
pol = bigtable.MaxVersionsPolicy(int(n))
default:
log.Fatalf("Bad GC policy %q", p)
return nil, fmt.Errorf("Bad GC policy %q", p)
}
if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
log.Fatalf("Setting GC policy: %v", err)
return pol, nil
}
func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) {
switch storageTypeStr {
case "SSD":
return bigtable.SSD, nil
case "HDD":
return bigtable.HDD, nil
}
return -1, fmt.Errorf("Invalid storage type: %v, must be SSD or HDD", storageTypeStr)
}
func doCreateTableFromSnapshot(ctx context.Context, args ...string) {
if len(args) != 3 {
log.Fatal("usage: cbt createtablefromsnapshot <table> <cluster> <snapshot>")
}
tableName := args[0]
clusterName := args[1]
snapshotName := args[2]
err := getAdminClient().CreateTableFromSnapshot(ctx, tableName, clusterName, snapshotName)
if err != nil {
log.Fatalf("Creating table: %v", err)
}
}
func doSnapshotTable(ctx context.Context, args ...string) {
if len(args) != 3 && len(args) != 4 {
log.Fatal("usage: cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>]")
}
clusterName := args[0]
snapshotName := args[1]
tableName := args[2]
ttl := bigtable.DefaultSnapshotDuration
for _, arg := range args[3:] {
i := strings.Index(arg, "=")
if i < 0 {
log.Fatalf("Bad arg %q", arg)
}
key, val := arg[:i], arg[i+1:]
switch key {
default:
log.Fatalf("Unknown arg key %q", key)
case "ttl":
var err error
ttl, err = parseDuration(val)
if err != nil {
log.Fatalf("Invalid snapshot ttl value %q: %v", val, err)
}
}
}
err := getAdminClient().SnapshotTable(ctx, tableName, clusterName, snapshotName, ttl)
if err != nil {
log.Fatalf("Failed to create Snapshot: %v", err)
}
}
func doListSnapshots(ctx context.Context, args ...string) {
if len(args) != 0 && len(args) != 1 {
log.Fatal("usage: cbt listsnapshots [<cluster>]")
}
var cluster string
if len(args) == 0 {
cluster = "-"
} else {
cluster = args[0]
}
it := getAdminClient().ListSnapshots(ctx, cluster)
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
fmt.Fprintf(tw, "Snapshot\tSource Table\tCreated At\tExpires At\n")
fmt.Fprintf(tw, "--------\t------------\t----------\t----------\n")
timeLayout := "2006-01-02 15:04 MST"
for {
snapshot, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatalf("Failed to fetch snapshots %v", err)
}
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", snapshot.Name, snapshot.SourceTable, snapshot.CreateTime.Format(timeLayout), snapshot.DeleteTime.Format(timeLayout))
}
tw.Flush()
}
func doGetSnapshot(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatalf("usage: cbt getsnapshot <cluster> <snapshot>")
}
clusterName := args[0]
snapshotName := args[1]
snapshot, err := getAdminClient().SnapshotInfo(ctx, clusterName, snapshotName)
if err != nil {
log.Fatalf("Failed to get snapshot: %v", err)
}
timeLayout := "2006-01-02 15:04 MST"
fmt.Printf("Name: %s\n", snapshot.Name)
fmt.Printf("Source table: %s\n", snapshot.SourceTable)
fmt.Printf("Created at: %s\n", snapshot.CreateTime.Format(timeLayout))
fmt.Printf("Expires at: %s\n", snapshot.DeleteTime.Format(timeLayout))
}
func doDeleteSnapshot(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt deletesnapshot <cluster> <snapshot>")
}
cluster := args[0]
snapshot := args[1]
err := getAdminClient().DeleteSnapshot(ctx, cluster, snapshot)
if err != nil {
log.Fatalf("Failed to delete snapshot: %v", err)
}
}

View File

@ -17,7 +17,9 @@
//go:generate go run cbt.go -o cbtdoc.go doc
/*
Cbt is a tool for doing basic interactions with Cloud Bigtable.
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
install the cbt tool, see the
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
Usage:
@ -26,33 +28,58 @@ Usage:
The commands are:
count Count rows in a table
createinstance Create an instance with an initial cluster
createcluster Create a cluster in the configured instance (replication alpha)
createfamily Create a column family
createtable Create a table
updatecluster Update a cluster in the configured instance
deleteinstance Deletes an instance
deletecluster Deletes a cluster from the configured instance (replication alpha)
deletecolumn Delete all cells in a column
deletefamily Delete a column family
deleterow Delete a row
deletetable Delete a table
doc Print godoc-suitable documentation for cbt
help Print help text
listinstances List instances in a project
listclusters List instances in an instance
lookup Read from a single row
ls List tables and column families
mddoc Print documentation for cbt in Markdown format
read Read rows
set Set value of a cell
setgcpolicy Set the GC policy for a column family
waitforreplication Blocks until all the completed writes have been replicated to all the clusters (replication alpha)
version Print the current cbt version
Use "cbt help <command>" for more information about a command.
The options are:
-project string
project ID
project ID, if unset uses gcloud configured project
-instance string
Cloud Bigtable instance
-creds string
if set, use application credentials in this file
Alpha features are not currently available to most Cloud Bigtable customers. The
features might be changed in backward-incompatible ways and are not recommended
for production use. They are not subject to any SLA or deprecation policy.
For convenience, values of the -project, -instance, -creds,
-admin-endpoint and -data-endpoint flags may be specified in
/usr/local/google/home/igorbernstein/.cbtrc in this format:
project = my-project-123
instance = my-instance
creds = path-to-account-key.json
admin-endpoint = hostname:port
data-endpoint = hostname:port
All values are optional, and all will be overridden by flags.
Count rows in a table
Usage:
@ -61,6 +88,34 @@ Usage:
Create an instance with an initial cluster
Usage:
cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type>
instance-id Permanent, unique id for the instance
display-name Description of the instance
cluster-id Permanent, unique id for the cluster in the instance
zone The zone in which to create the cluster
num-nodes The number of nodes to create
storage-type SSD or HDD
Create a cluster in the configured instance (replication alpha)
Usage:
cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>
cluster-id Permanent, unique id for the cluster in the instance
zone The zone in which to create the cluster
num-nodes The number of nodes to create
storage-type SSD or HDD
Create a column family
Usage:
@ -72,7 +127,46 @@ Usage:
Create a table
Usage:
cbt createtable <table>
cbt createtable <table> [families=family[:(maxage=<d> | maxversions=<n>)],...] [splits=split,...]
families: Column families and their associated GC policies. See "setgcpolicy".
Example: families=family1:maxage=1w,family2:maxversions=1
splits: Row key to be used to initially split the table
Update a cluster in the configured instance
Usage:
cbt updatecluster <cluster-id> [num-nodes=num-nodes]
cluster-id Permanent, unique id for the cluster in the instance
num-nodes The number of nodes to update to
Deletes an instance
Usage:
cbt deleteinstance <instance>
Deletes a cluster from the configured instance (replication alpha)
Usage:
cbt deletecluster <cluster>
Delete all cells in a column
Usage:
cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
@ -88,7 +182,9 @@ Usage:
Delete a row
Usage:
cbt deleterow <table> <row>
cbt deleterow <table> <row> [app-profile=<app profile id>]
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
@ -125,10 +221,20 @@ Usage:
List instances in an instance
Usage:
cbt listclusters
Read from a single row
Usage:
cbt lookup <table> <row>
cbt lookup <table> <row> [app-profile=<app profile id>]
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
@ -153,11 +259,13 @@ Usage:
Read rows
Usage:
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>] [app-profile=<app profile id>]
start=<row> Start reading at this row
end=<row> Stop reading before this row
prefix=<prefix> Read rows with this prefix
regex=<regex> Read rows with keys matching this regex
count=<n> Read only this many rows
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
@ -166,7 +274,8 @@ Usage:
Set value of a cell
Usage:
cbt set <table> <row> family:column=val[@ts] ...
cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
family:column=val[@ts] may be repeated to set multiple cells.
ts is an optional integer timestamp.
@ -187,5 +296,21 @@ Usage:
Blocks until all the completed writes have been replicated to all the clusters (replication alpha)
Usage:
cbt waitforreplication <table>
Print the current cbt version
Usage:
cbt version
*/
package main

View File

@ -107,11 +107,12 @@ func main() {
// Create a scratch table.
log.Printf("Setting up scratch table...")
if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil {
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
tblConf := bigtable.TableConf{
TableID: *scratchTable,
Families: map[string]bigtable.GCPolicy{"f": bigtable.MaxVersionsPolicy(1)},
}
if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil {
log.Fatalf("Making scratch table column family: %v", err)
if err := adminClient.CreateTableFromConf(context.Background(), &tblConf); err != nil {
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
}
// Upon a successful run, delete the table. Don't bother checking for errors.
defer adminClient.DeleteTable(context.Background(), *scratchTable)

View File

@ -98,7 +98,7 @@ type EmulatedEnv struct {
// NewEmulatedEnv builds and starts the emulator based environment
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) {
srv, err := bttest.NewServer("127.0.0.1:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20))
srv, err := bttest.NewServer("localhost:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20))
if err != nil {
return nil, err
}

View File

@ -181,7 +181,7 @@ type timestampRangeFilter struct {
}
func (trf timestampRangeFilter) String() string {
return fmt.Sprintf("timestamp_range(%s,%s)", trf.startTime, trf.endTime)
return fmt.Sprintf("timestamp_range(%v,%v)", trf.startTime, trf.endTime)
}
func (trf timestampRangeFilter) proto() *btpb.RowFilter {

View File

@ -135,24 +135,24 @@ func GCRuleToString(rule *bttdpb.GcRule) string {
if rule == nil {
return "<default>"
}
var ruleStr string
if r, ok := rule.Rule.(*bttdpb.GcRule_MaxNumVersions); ok {
ruleStr += MaxVersionsPolicy(int(r.MaxNumVersions)).String()
} else if r, ok := rule.Rule.(*bttdpb.GcRule_MaxAge); ok {
ruleStr += MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Intersection_); ok {
var chunks []string
for _, intRule := range r.Intersection.Rules {
chunks = append(chunks, GCRuleToString(intRule))
}
ruleStr += "(" + strings.Join(chunks, " && ") + ")"
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Union_); ok {
var chunks []string
for _, unionRule := range r.Union.Rules {
chunks = append(chunks, GCRuleToString(unionRule))
}
ruleStr += "(" + strings.Join(chunks, " || ") + ")"
switch r := rule.Rule.(type) {
case *bttdpb.GcRule_MaxNumVersions:
return MaxVersionsPolicy(int(r.MaxNumVersions)).String()
case *bttdpb.GcRule_MaxAge:
return MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
case *bttdpb.GcRule_Intersection_:
return joinRules(r.Intersection.Rules, " && ")
case *bttdpb.GcRule_Union_:
return joinRules(r.Union.Rules, " || ")
default:
return ""
}
return ruleStr
}
func joinRules(rules []*bttdpb.GcRule, sep string) string {
var chunks []string
for _, r := range rules {
chunks = append(chunks, GCRuleToString(r))
}
return "(" + strings.Join(chunks, sep) + ")"
}

View File

@ -20,10 +20,11 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"reflect"
"strings"
"testing"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/wrappers"
btspb "google.golang.org/genproto/googleapis/bigtable/v2"
@ -48,7 +49,7 @@ func TestSingleCell(t *testing.T) {
t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"]))
}
want := []ReadItem{ri("rk", "fm", "col", 1, "value")}
if !reflect.DeepEqual(row["fm"], want) {
if !testutil.Equal(row["fm"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want)
}
if err := cr.Close(); err != nil {
@ -76,14 +77,14 @@ func TestMultipleCells(t *testing.T) {
ri("rs", "fm1", "col1", 1, "val2"),
ri("rs", "fm1", "col2", 0, "val3"),
}
if !reflect.DeepEqual(row["fm1"], want) {
if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
}
want = []ReadItem{
ri("rs", "fm2", "col1", 0, "val4"),
ri("rs", "fm2", "col2", 1, "extralongval5"),
}
if !reflect.DeepEqual(row["fm2"], want) {
if !testutil.Equal(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
}
if err := cr.Close(); err != nil {
@ -108,7 +109,7 @@ func TestSplitCells(t *testing.T) {
ri("rs", "fm1", "col1", 0, "hello world"),
ri("rs", "fm1", "col2", 0, "val2"),
}
if !reflect.DeepEqual(row["fm1"], want) {
if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
}
if err := cr.Close(); err != nil {
@ -124,7 +125,7 @@ func TestMultipleRows(t *testing.T) {
t.Fatalf("Processing chunk: %v", err)
}
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) {
if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
}
@ -133,7 +134,7 @@ func TestMultipleRows(t *testing.T) {
t.Fatalf("Processing chunk: %v", err)
}
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
if !reflect.DeepEqual(row["fm2"], want) {
if !testutil.Equal(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
}
@ -150,7 +151,7 @@ func TestBlankQualifier(t *testing.T) {
t.Fatalf("Processing chunk: %v", err)
}
want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) {
if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
}
@ -159,7 +160,7 @@ func TestBlankQualifier(t *testing.T) {
t.Fatalf("Processing chunk: %v", err)
}
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
if !reflect.DeepEqual(row["fm2"], want) {
if !testutil.Equal(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
}
@ -177,7 +178,7 @@ func TestReset(t *testing.T) {
cr.Process(ccReset())
row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) {
if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want)
}
if err := cr.Close(); err != nil {
@ -279,7 +280,7 @@ func runTestCase(t *testing.T, test TestCase) {
got := toSet(results)
want := toSet(test.Results)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want)
}
}

View File

@ -16,13 +16,14 @@ limitations under the License.
package bigtable
import (
"reflect"
"strings"
"testing"
"time"
"cloud.google.com/go/bigtable/bttest"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/google/go-cmp/cmp"
"golang.org/x/net/context"
"google.golang.org/api/option"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
@ -32,7 +33,7 @@ import (
)
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) {
srv, err := bttest.NewServer("127.0.0.1:0", opt...)
srv, err := bttest.NewServer("localhost:0", opt...)
if err != nil {
return nil, nil, err
}
@ -231,13 +232,13 @@ func TestRetryApplyBulk(t *testing.T) {
}
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut})
if err != nil {
t.Errorf("unretryable errors: request failed %v")
t.Errorf("unretryable errors: request failed %v", err)
}
want := []error{
grpc.Errorf(codes.FailedPrecondition, ""),
grpc.Errorf(codes.Aborted, ""),
}
if !reflect.DeepEqual(want, errors) {
if !testutil.Equal(want, errors) {
t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
}
@ -273,7 +274,7 @@ func TestRetainRowsAfter(t *testing.T) {
prevRowKey := "m"
want := NewRange("m\x00", "z")
got := prevRowRange.retainRowsAfter(prevRowKey)
if !reflect.DeepEqual(want, got) {
if !testutil.Equal(want, got, cmp.AllowUnexported(RowRange{})) {
t.Errorf("range retry: got %v, want %v", got, want)
}
@ -281,7 +282,7 @@ func TestRetainRowsAfter(t *testing.T) {
prevRowKey = "f"
wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")}
got = prevRowRangeList.retainRowsAfter(prevRowKey)
if !reflect.DeepEqual(wantRowRangeList, got) {
if !testutil.Equal(wantRowRangeList, got, cmp.AllowUnexported(RowRange{})) {
t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList)
}
@ -289,7 +290,7 @@ func TestRetainRowsAfter(t *testing.T) {
prevRowKey = "b"
wantList := RowList{"c", "d", "e", "f"}
got = prevRowList.retainRowsAfter(prevRowKey)
if !reflect.DeepEqual(wantList, got) {
if !testutil.Equal(wantList, got) {
t.Errorf("list retry: got %v, want %v", got, wantList)
}
}
@ -351,7 +352,7 @@ func TestRetryReadRows(t *testing.T) {
return true
})
want := []string{"a", "b", "c", "d"}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("retry range integration: got %v, want %v", got, want)
}
}

View File

@ -15,9 +15,9 @@
package breakpoints
import (
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/debug"
cd "google.golang.org/api/clouddebugger/v2"
)
@ -63,7 +63,7 @@ func TestBreakpointStore(t *testing.T) {
p := &Program{breakpointPCs: make(map[uint64]bool)}
bs := NewBreakpointStore(p)
checkPCs := func(expected map[uint64]bool) {
if !reflect.DeepEqual(p.breakpointPCs, expected) {
if !testutil.Equal(p.breakpointPCs, expected) {
t.Errorf("got breakpoint map %v want %v", p.breakpointPCs, expected)
}
}
@ -83,7 +83,7 @@ func TestBreakpointStore(t *testing.T) {
{testPC3, []*cd.Breakpoint{testBP2}},
{testLogPC, []*cd.Breakpoint{testLogBP}},
} {
if bps := bs.BreakpointsAtPC(test.pc); !reflect.DeepEqual(bps, test.expected) {
if bps := bs.BreakpointsAtPC(test.pc); !testutil.Equal(bps, test.expected) {
t.Errorf("BreakpointsAtPC(%x): got %v want %v", test.pc, bps, test.expected)
}
}

View File

@ -16,9 +16,9 @@ package valuecollector
import (
"fmt"
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/debug"
cd "google.golang.org/api/clouddebugger/v2"
)
@ -66,7 +66,7 @@ func TestValueCollector(t *testing.T) {
}
for i, v := range variablesToAdd {
added := c.AddVariable(v)
if !reflect.DeepEqual(added, expectedResults[i]) {
if !testutil.Equal(added, expectedResults[i]) {
t.Errorf("AddVariable: got %+v want %+v", *added, *expectedResults[i])
}
}
@ -162,11 +162,11 @@ func TestValueCollector(t *testing.T) {
&cd.Variable{Value: "1404"},
&cd.Variable{Value: "2400"},
}
if !reflect.DeepEqual(v, expectedValues) {
if !testutil.Equal(v, expectedValues) {
t.Errorf("ReadValues: got %v want %v", v, expectedValues)
// Do element-by-element comparisons, for more useful error messages.
for i := range v {
if i < len(expectedValues) && !reflect.DeepEqual(v[i], expectedValues[i]) {
if i < len(expectedValues) && !testutil.Equal(v[i], expectedValues[i]) {
t.Errorf("element %d: got %+v want %+v", i, *v[i], *expectedValues[i])
}
}

View File

@ -0,0 +1,68 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package container
import (
containerpb "google.golang.org/genproto/googleapis/container/v1"
)
import (
"fmt"
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = fmt.Sprintf
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestClusterManagerSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClusterManagerClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var projectId2 string = projectId
var zone string = "us-central1-a"
var request = &containerpb.ListClustersRequest{
ProjectId: projectId2,
Zone: zone,
}
if _, err := c.ListClusters(ctx, request); err != nil {
t.Error(err)
}
}

View File

@ -0,0 +1,674 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package container
import (
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
containerpb "google.golang.org/genproto/googleapis/container/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient.
type ClusterManagerCallOptions struct {
ListClusters []gax.CallOption
GetCluster []gax.CallOption
CreateCluster []gax.CallOption
UpdateCluster []gax.CallOption
UpdateNodePool []gax.CallOption
SetNodePoolAutoscaling []gax.CallOption
SetLoggingService []gax.CallOption
SetMonitoringService []gax.CallOption
SetAddonsConfig []gax.CallOption
SetLocations []gax.CallOption
UpdateMaster []gax.CallOption
SetMasterAuth []gax.CallOption
DeleteCluster []gax.CallOption
ListOperations []gax.CallOption
GetOperation []gax.CallOption
CancelOperation []gax.CallOption
GetServerConfig []gax.CallOption
ListNodePools []gax.CallOption
GetNodePool []gax.CallOption
CreateNodePool []gax.CallOption
DeleteNodePool []gax.CallOption
RollbackNodePoolUpgrade []gax.CallOption
SetNodePoolManagement []gax.CallOption
SetLabels []gax.CallOption
SetLegacyAbac []gax.CallOption
StartIPRotation []gax.CallOption
CompleteIPRotation []gax.CallOption
SetNodePoolSize []gax.CallOption
SetNetworkPolicy []gax.CallOption
SetMaintenancePolicy []gax.CallOption
}
func defaultClusterManagerClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("container.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultClusterManagerCallOptions() *ClusterManagerCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &ClusterManagerCallOptions{
ListClusters: retry[[2]string{"default", "idempotent"}],
GetCluster: retry[[2]string{"default", "idempotent"}],
CreateCluster: retry[[2]string{"default", "non_idempotent"}],
UpdateCluster: retry[[2]string{"default", "non_idempotent"}],
UpdateNodePool: retry[[2]string{"default", "non_idempotent"}],
SetNodePoolAutoscaling: retry[[2]string{"default", "non_idempotent"}],
SetLoggingService: retry[[2]string{"default", "non_idempotent"}],
SetMonitoringService: retry[[2]string{"default", "non_idempotent"}],
SetAddonsConfig: retry[[2]string{"default", "non_idempotent"}],
SetLocations: retry[[2]string{"default", "non_idempotent"}],
UpdateMaster: retry[[2]string{"default", "non_idempotent"}],
SetMasterAuth: retry[[2]string{"default", "non_idempotent"}],
DeleteCluster: retry[[2]string{"default", "idempotent"}],
ListOperations: retry[[2]string{"default", "idempotent"}],
GetOperation: retry[[2]string{"default", "idempotent"}],
CancelOperation: retry[[2]string{"default", "non_idempotent"}],
GetServerConfig: retry[[2]string{"default", "idempotent"}],
ListNodePools: retry[[2]string{"default", "idempotent"}],
GetNodePool: retry[[2]string{"default", "idempotent"}],
CreateNodePool: retry[[2]string{"default", "non_idempotent"}],
DeleteNodePool: retry[[2]string{"default", "idempotent"}],
RollbackNodePoolUpgrade: retry[[2]string{"default", "non_idempotent"}],
SetNodePoolManagement: retry[[2]string{"default", "non_idempotent"}],
SetLabels: retry[[2]string{"default", "non_idempotent"}],
SetLegacyAbac: retry[[2]string{"default", "non_idempotent"}],
StartIPRotation: retry[[2]string{"default", "non_idempotent"}],
CompleteIPRotation: retry[[2]string{"default", "non_idempotent"}],
SetNodePoolSize: retry[[2]string{"default", "non_idempotent"}],
SetNetworkPolicy: retry[[2]string{"default", "non_idempotent"}],
SetMaintenancePolicy: retry[[2]string{"default", "non_idempotent"}],
}
}
// ClusterManagerClient is a client for interacting with Google Container Engine API.
type ClusterManagerClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
clusterManagerClient containerpb.ClusterManagerClient
// The call options for this service.
CallOptions *ClusterManagerCallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClusterManagerClient creates a new cluster manager client.
//
// Google Container Engine Cluster Manager v1
func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClusterManagerClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &ClusterManagerClient{
conn: conn,
CallOptions: defaultClusterManagerCallOptions(),
clusterManagerClient: containerpb.NewClusterManagerClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *ClusterManagerClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *ClusterManagerClient) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ListClusters lists all clusters owned by a project in either the specified zone or all
// zones.
func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...)
var resp *containerpb.ListClustersResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetCluster gets the details of a specific cluster.
func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...)
var resp *containerpb.Cluster
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateCluster creates a cluster, consisting of the specified number and type of Google
// Compute Engine instances.
//
// By default, the cluster is created in the project's
// default network (at /compute/docs/networks-and-firewalls#networks).
//
// One firewall is added for the cluster. After cluster creation,
// the cluster creates routes for each node to allow the containers
// on that node to communicate with all other instances in the
// cluster.
//
// Finally, an entry is added to the project's global metadata indicating
// which CIDR range is being used by the cluster.
func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateCluster updates the settings of a specific cluster.
func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateNodePool updates the version and/or image type of a specific node pool.
func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetNodePoolAutoscaling sets the autoscaling settings of a specific node pool.
func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetLoggingService sets the logging service of a specific cluster.
func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetMonitoringService sets the monitoring service of a specific cluster.
func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetAddonsConfig sets the addons of a specific cluster.
func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetLocations sets the locations of a specific cluster.
func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateMaster updates the master of a specific cluster.
func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetMasterAuth used to set master auth materials. Currently supports :-
// Changing the admin password of a specific cluster.
// This can be either via password generation or explicitly set the password.
func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker
// nodes.
//
// Firewalls and routes that were configured during cluster creation
// are also deleted.
//
// Other Google Compute Engine resources that might be in use by the cluster
// (e.g. load balancer resources) will not be deleted if they weren't present
// at the initial create time.
func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListOperations lists all operations in a project in a specific zone or all zones.
func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
var resp *containerpb.ListOperationsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetOperation gets the specified operation.
func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CancelOperation cancels the specified operation.
func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// GetServerConfig returns configuration info about the Container Engine service.
func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...)
var resp *containerpb.ServerConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListNodePools lists the node pools for a cluster.
func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...)
var resp *containerpb.ListNodePoolsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetNodePool retrieves the node pool requested.
func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...)
var resp *containerpb.NodePool
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateNodePool creates a node pool for a cluster.
func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteNodePool deletes a node pool from a cluster.
func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// RollbackNodePoolUpgrade roll back the previously Aborted or Failed NodePool upgrade.
// This will be an no-op if the last upgrade successfully completed.
func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetNodePoolManagement sets the NodeManagement options for a node pool.
func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetLabels sets labels on a cluster.
func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster.
func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// StartIPRotation start master IP rotation.
func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CompleteIPRotation completes master IP rotation.
func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetNodePoolSize sets the size of a specific node pool.
func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetNetworkPolicy enables/Disables Network Policy for a cluster.
func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetMaintenancePolicy sets the maintenance policy for a cluster.
func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

View File

@ -0,0 +1,571 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package container_test
import (
"cloud.google.com/go/container/apiv1"
"golang.org/x/net/context"
containerpb "google.golang.org/genproto/googleapis/container/v1"
)
func ExampleNewClusterManagerClient() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClusterManagerClient_ListClusters() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.ListClustersRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListClusters(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_GetCluster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.GetClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_CreateCluster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CreateClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_UpdateCluster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.UpdateClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_UpdateNodePool() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.UpdateNodePoolRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateNodePool(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetNodePoolAutoscaling() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetNodePoolAutoscalingRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetNodePoolAutoscaling(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetLoggingService() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetLoggingServiceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetLoggingService(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetMonitoringService() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetMonitoringServiceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetMonitoringService(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetAddonsConfig() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetAddonsConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetAddonsConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetLocations() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetLocationsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetLocations(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_UpdateMaster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.UpdateMasterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateMaster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetMasterAuth() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetMasterAuthRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetMasterAuth(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_DeleteCluster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.DeleteClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.DeleteCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_ListOperations() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.ListOperationsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListOperations(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_GetOperation() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.GetOperationRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetOperation(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_CancelOperation() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CancelOperationRequest{
// TODO: Fill request struct fields.
}
err = c.CancelOperation(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClusterManagerClient_GetServerConfig() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.GetServerConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetServerConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_ListNodePools() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.ListNodePoolsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListNodePools(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_GetNodePool() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.GetNodePoolRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetNodePool(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_CreateNodePool() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CreateNodePoolRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateNodePool(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_DeleteNodePool() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.DeleteNodePoolRequest{
// TODO: Fill request struct fields.
}
resp, err := c.DeleteNodePool(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_RollbackNodePoolUpgrade() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.RollbackNodePoolUpgradeRequest{
// TODO: Fill request struct fields.
}
resp, err := c.RollbackNodePoolUpgrade(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetNodePoolManagement() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetNodePoolManagementRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetNodePoolManagement(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetLabels() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetLabelsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetLabels(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetLegacyAbac() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetLegacyAbacRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetLegacyAbac(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_StartIPRotation() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.StartIPRotationRequest{
// TODO: Fill request struct fields.
}
resp, err := c.StartIPRotation(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_CompleteIPRotation() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CompleteIPRotationRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CompleteIPRotation(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetNodePoolSize() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetNodePoolSizeRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetNodePoolSize(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetNetworkPolicy() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetNetworkPolicyRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetNetworkPolicy(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetMaintenancePolicy() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetMaintenancePolicyRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetMaintenancePolicy(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

48
vendor/cloud.google.com/go/container/apiv1/doc.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package container is an auto-generated package for the
// Google Container Engine API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// The Google Kubernetes Engine API is used for building and managing
// container
// based applications, powered by the open source Kubernetes technology.
package container // import "cloud.google.com/go/container/apiv1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

2912
vendor/cloud.google.com/go/container/apiv1/mock_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@
// Package container contains a deprecated Google Container Engine client.
//
// Deprecated: Use google.golang.org/api/container instead.
// Deprecated: Use cloud.google.com/go/container/apiv1 instead.
package container // import "cloud.google.com/go/container"
import (

View File

@ -0,0 +1,69 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dataproc
import (
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
)
import (
"fmt"
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = fmt.Sprintf
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestClusterControllerSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClusterControllerClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var projectId2 string = projectId
var region string = "global"
var request = &dataprocpb.ListClustersRequest{
ProjectId: projectId2,
Region: region,
}
iter := c.ListClusters(ctx, request)
if _, err := iter.Next(); err != nil && err != iterator.Done {
t.Error(err)
}
}

View File

@ -0,0 +1,593 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dataproc
import (
"math"
"time"
"cloud.google.com/go/internal/version"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// ClusterControllerCallOptions contains the retry settings for each method of ClusterControllerClient.
type ClusterControllerCallOptions struct {
CreateCluster []gax.CallOption
UpdateCluster []gax.CallOption
DeleteCluster []gax.CallOption
GetCluster []gax.CallOption
ListClusters []gax.CallOption
DiagnoseCluster []gax.CallOption
}
func defaultClusterControllerClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("dataproc.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultClusterControllerCallOptions() *ClusterControllerCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &ClusterControllerCallOptions{
CreateCluster: retry[[2]string{"default", "non_idempotent"}],
UpdateCluster: retry[[2]string{"default", "non_idempotent"}],
DeleteCluster: retry[[2]string{"default", "idempotent"}],
GetCluster: retry[[2]string{"default", "idempotent"}],
ListClusters: retry[[2]string{"default", "idempotent"}],
DiagnoseCluster: retry[[2]string{"default", "non_idempotent"}],
}
}
// ClusterControllerClient is a client for interacting with Google Cloud Dataproc API.
type ClusterControllerClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
clusterControllerClient dataprocpb.ClusterControllerClient
// LROClient is used internally to handle longrunning operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient
// The call options for this service.
CallOptions *ClusterControllerCallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClusterControllerClient creates a new cluster controller client.
//
// The ClusterControllerService provides methods to manage clusters
// of Google Compute Engine instances.
func NewClusterControllerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterControllerClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClusterControllerClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &ClusterControllerClient{
conn: conn,
CallOptions: defaultClusterControllerCallOptions(),
clusterControllerClient: dataprocpb.NewClusterControllerClient(conn),
}
c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *ClusterControllerClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *ClusterControllerClient) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *ClusterControllerClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// CreateCluster creates a cluster in a project.
func (c *ClusterControllerClient) CreateCluster(ctx context.Context, req *dataprocpb.CreateClusterRequest, opts ...gax.CallOption) (*CreateClusterOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterControllerClient.CreateCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &CreateClusterOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// UpdateCluster updates a cluster in a project.
func (c *ClusterControllerClient) UpdateCluster(ctx context.Context, req *dataprocpb.UpdateClusterRequest, opts ...gax.CallOption) (*UpdateClusterOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterControllerClient.UpdateCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &UpdateClusterOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// DeleteCluster deletes a cluster in a project.
func (c *ClusterControllerClient) DeleteCluster(ctx context.Context, req *dataprocpb.DeleteClusterRequest, opts ...gax.CallOption) (*DeleteClusterOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterControllerClient.DeleteCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &DeleteClusterOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// GetCluster gets the resource representation for a cluster in a project.
func (c *ClusterControllerClient) GetCluster(ctx context.Context, req *dataprocpb.GetClusterRequest, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...)
var resp *dataprocpb.Cluster
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterControllerClient.GetCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListClusters lists all regions/{region}/clusters in a project.
func (c *ClusterControllerClient) ListClusters(ctx context.Context, req *dataprocpb.ListClustersRequest, opts ...gax.CallOption) *ClusterIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...)
it := &ClusterIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*dataprocpb.Cluster, string, error) {
var resp *dataprocpb.ListClustersResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterControllerClient.ListClusters(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Clusters, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// DiagnoseCluster gets cluster diagnostic information.
// After the operation completes, the Operation.response field
// contains DiagnoseClusterOutputLocation.
func (c *ClusterControllerClient) DiagnoseCluster(ctx context.Context, req *dataprocpb.DiagnoseClusterRequest, opts ...gax.CallOption) (*DiagnoseClusterOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DiagnoseCluster[0:len(c.CallOptions.DiagnoseCluster):len(c.CallOptions.DiagnoseCluster)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterControllerClient.DiagnoseCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &DiagnoseClusterOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// ClusterIterator manages a stream of *dataprocpb.Cluster.
type ClusterIterator struct {
items []*dataprocpb.Cluster
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*dataprocpb.Cluster, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *ClusterIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *ClusterIterator) Next() (*dataprocpb.Cluster, error) {
var item *dataprocpb.Cluster
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *ClusterIterator) bufLen() int {
return len(it.items)
}
func (it *ClusterIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// CreateClusterOperation manages a long-running operation from CreateCluster.
type CreateClusterOperation struct {
lro *longrunning.Operation
}
// CreateClusterOperation returns a new CreateClusterOperation from a given name.
// The name must be that of a previously created CreateClusterOperation, possibly from a different process.
func (c *ClusterControllerClient) CreateClusterOperation(name string) *CreateClusterOperation {
return &CreateClusterOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *CreateClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
var resp dataprocpb.Cluster
if err := op.lro.WaitWithInterval(ctx, &resp, 10000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *CreateClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
var resp dataprocpb.Cluster
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *CreateClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) {
var meta dataprocpb.ClusterOperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *CreateClusterOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *CreateClusterOperation) Name() string {
return op.lro.Name()
}
// Delete deletes a long-running operation.
// This method indicates that the client is no longer interested in the operation result.
// It does not cancel the operation.
func (op *CreateClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Delete(ctx, opts...)
}
// DeleteClusterOperation manages a long-running operation from DeleteCluster.
type DeleteClusterOperation struct {
lro *longrunning.Operation
}
// DeleteClusterOperation returns a new DeleteClusterOperation from a given name.
// The name must be that of a previously created DeleteClusterOperation, possibly from a different process.
func (c *ClusterControllerClient) DeleteClusterOperation(name string) *DeleteClusterOperation {
return &DeleteClusterOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning any error encountered.
//
// See documentation of Poll for error-handling information.
func (op *DeleteClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.WaitWithInterval(ctx, nil, 10000*time.Millisecond, opts...)
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
func (op *DeleteClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Poll(ctx, nil, opts...)
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *DeleteClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) {
var meta dataprocpb.ClusterOperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *DeleteClusterOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *DeleteClusterOperation) Name() string {
return op.lro.Name()
}
// Delete deletes a long-running operation.
// This method indicates that the client is no longer interested in the operation result.
// It does not cancel the operation.
func (op *DeleteClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Delete(ctx, opts...)
}
// DiagnoseClusterOperation manages a long-running operation from DiagnoseCluster.
type DiagnoseClusterOperation struct {
lro *longrunning.Operation
}
// DiagnoseClusterOperation returns a new DiagnoseClusterOperation from a given name.
// The name must be that of a previously created DiagnoseClusterOperation, possibly from a different process.
func (c *ClusterControllerClient) DiagnoseClusterOperation(name string) *DiagnoseClusterOperation {
return &DiagnoseClusterOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning any error encountered.
//
// See documentation of Poll for error-handling information.
func (op *DiagnoseClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.WaitWithInterval(ctx, nil, 10000*time.Millisecond, opts...)
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
func (op *DiagnoseClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Poll(ctx, nil, opts...)
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *DiagnoseClusterOperation) Metadata() (*dataprocpb.DiagnoseClusterResults, error) {
var meta dataprocpb.DiagnoseClusterResults
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *DiagnoseClusterOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *DiagnoseClusterOperation) Name() string {
return op.lro.Name()
}
// Delete deletes a long-running operation.
// This method indicates that the client is no longer interested in the operation result.
// It does not cancel the operation.
func (op *DiagnoseClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Delete(ctx, opts...)
}
// UpdateClusterOperation manages a long-running operation from UpdateCluster.
type UpdateClusterOperation struct {
lro *longrunning.Operation
}
// UpdateClusterOperation returns a new UpdateClusterOperation from a given name.
// The name must be that of a previously created UpdateClusterOperation, possibly from a different process.
func (c *ClusterControllerClient) UpdateClusterOperation(name string) *UpdateClusterOperation {
return &UpdateClusterOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *UpdateClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
var resp dataprocpb.Cluster
if err := op.lro.WaitWithInterval(ctx, &resp, 10000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *UpdateClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
var resp dataprocpb.Cluster
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *UpdateClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) {
var meta dataprocpb.ClusterOperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *UpdateClusterOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *UpdateClusterOperation) Name() string {
return op.lro.Name()
}
// Delete deletes a long-running operation.
// This method indicates that the client is no longer interested in the operation result.
// It does not cancel the operation.
func (op *UpdateClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Delete(ctx, opts...)
}

View File

@ -0,0 +1,160 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dataproc_test
import (
"cloud.google.com/go/dataproc/apiv1"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
)
func ExampleNewClusterControllerClient() {
ctx := context.Background()
c, err := dataproc.NewClusterControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClusterControllerClient_CreateCluster() {
ctx := context.Background()
c, err := dataproc.NewClusterControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.CreateClusterRequest{
// TODO: Fill request struct fields.
}
op, err := c.CreateCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterControllerClient_UpdateCluster() {
ctx := context.Background()
c, err := dataproc.NewClusterControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.UpdateClusterRequest{
// TODO: Fill request struct fields.
}
op, err := c.UpdateCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterControllerClient_DeleteCluster() {
ctx := context.Background()
c, err := dataproc.NewClusterControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.DeleteClusterRequest{
// TODO: Fill request struct fields.
}
op, err := c.DeleteCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
err = op.Wait(ctx)
// TODO: Handle error.
}
func ExampleClusterControllerClient_GetCluster() {
ctx := context.Background()
c, err := dataproc.NewClusterControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.GetClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterControllerClient_ListClusters() {
ctx := context.Background()
c, err := dataproc.NewClusterControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.ListClustersRequest{
// TODO: Fill request struct fields.
}
it := c.ListClusters(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClusterControllerClient_DiagnoseCluster() {
ctx := context.Background()
c, err := dataproc.NewClusterControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.DiagnoseClusterRequest{
// TODO: Fill request struct fields.
}
op, err := c.DiagnoseCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
err = op.Wait(ctx)
// TODO: Handle error.
}

46
vendor/cloud.google.com/go/dataproc/apiv1/doc.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package dataproc is an auto-generated package for the
// Google Cloud Dataproc API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Manages Hadoop-based clusters and jobs on Google Cloud Platform.
package dataproc // import "cloud.google.com/go/dataproc/apiv1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

View File

@ -0,0 +1,285 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dataproc
import (
"math"
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// JobControllerCallOptions contains the retry settings for each method of JobControllerClient.
type JobControllerCallOptions struct {
SubmitJob []gax.CallOption
GetJob []gax.CallOption
ListJobs []gax.CallOption
UpdateJob []gax.CallOption
CancelJob []gax.CallOption
DeleteJob []gax.CallOption
}
func defaultJobControllerClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("dataproc.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultJobControllerCallOptions() *JobControllerCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &JobControllerCallOptions{
SubmitJob: retry[[2]string{"default", "non_idempotent"}],
GetJob: retry[[2]string{"default", "idempotent"}],
ListJobs: retry[[2]string{"default", "idempotent"}],
UpdateJob: retry[[2]string{"default", "non_idempotent"}],
CancelJob: retry[[2]string{"default", "non_idempotent"}],
DeleteJob: retry[[2]string{"default", "idempotent"}],
}
}
// JobControllerClient is a client for interacting with Google Cloud Dataproc API.
type JobControllerClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
jobControllerClient dataprocpb.JobControllerClient
// The call options for this service.
CallOptions *JobControllerCallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewJobControllerClient creates a new job controller client.
//
// The JobController provides methods to manage jobs.
func NewJobControllerClient(ctx context.Context, opts ...option.ClientOption) (*JobControllerClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultJobControllerClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &JobControllerClient{
conn: conn,
CallOptions: defaultJobControllerCallOptions(),
jobControllerClient: dataprocpb.NewJobControllerClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *JobControllerClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *JobControllerClient) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *JobControllerClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// SubmitJob submits a job to a cluster.
func (c *JobControllerClient) SubmitJob(ctx context.Context, req *dataprocpb.SubmitJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SubmitJob[0:len(c.CallOptions.SubmitJob):len(c.CallOptions.SubmitJob)], opts...)
var resp *dataprocpb.Job
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.jobControllerClient.SubmitJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetJob gets the resource representation for a job in a project.
func (c *JobControllerClient) GetJob(ctx context.Context, req *dataprocpb.GetJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetJob[0:len(c.CallOptions.GetJob):len(c.CallOptions.GetJob)], opts...)
var resp *dataprocpb.Job
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.jobControllerClient.GetJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListJobs lists regions/{region}/jobs in a project.
func (c *JobControllerClient) ListJobs(ctx context.Context, req *dataprocpb.ListJobsRequest, opts ...gax.CallOption) *JobIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListJobs[0:len(c.CallOptions.ListJobs):len(c.CallOptions.ListJobs)], opts...)
it := &JobIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*dataprocpb.Job, string, error) {
var resp *dataprocpb.ListJobsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.jobControllerClient.ListJobs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Jobs, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// UpdateJob updates a job in a project.
func (c *JobControllerClient) UpdateJob(ctx context.Context, req *dataprocpb.UpdateJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateJob[0:len(c.CallOptions.UpdateJob):len(c.CallOptions.UpdateJob)], opts...)
var resp *dataprocpb.Job
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.jobControllerClient.UpdateJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CancelJob starts a job cancellation request. To access the job resource
// after cancellation, call
// regions/{region}/jobs.list (at /dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
// regions/{region}/jobs.get (at /dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
func (c *JobControllerClient) CancelJob(ctx context.Context, req *dataprocpb.CancelJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CancelJob[0:len(c.CallOptions.CancelJob):len(c.CallOptions.CancelJob)], opts...)
var resp *dataprocpb.Job
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.jobControllerClient.CancelJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteJob deletes the job from the project. If the job is active, the delete fails,
// and the response returns FAILED_PRECONDITION.
func (c *JobControllerClient) DeleteJob(ctx context.Context, req *dataprocpb.DeleteJobRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteJob[0:len(c.CallOptions.DeleteJob):len(c.CallOptions.DeleteJob)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.jobControllerClient.DeleteJob(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// JobIterator manages a stream of *dataprocpb.Job.
type JobIterator struct {
items []*dataprocpb.Job
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*dataprocpb.Job, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *JobIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *JobIterator) Next() (*dataprocpb.Job, error) {
var item *dataprocpb.Job
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *JobIterator) bufLen() int {
return len(it.items)
}
func (it *JobIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

View File

@ -0,0 +1,146 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dataproc_test
import (
"cloud.google.com/go/dataproc/apiv1"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
)
func ExampleNewJobControllerClient() {
ctx := context.Background()
c, err := dataproc.NewJobControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleJobControllerClient_SubmitJob() {
ctx := context.Background()
c, err := dataproc.NewJobControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.SubmitJobRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SubmitJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleJobControllerClient_GetJob() {
ctx := context.Background()
c, err := dataproc.NewJobControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.GetJobRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleJobControllerClient_ListJobs() {
ctx := context.Background()
c, err := dataproc.NewJobControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.ListJobsRequest{
// TODO: Fill request struct fields.
}
it := c.ListJobs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleJobControllerClient_UpdateJob() {
ctx := context.Background()
c, err := dataproc.NewJobControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.UpdateJobRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleJobControllerClient_CancelJob() {
ctx := context.Background()
c, err := dataproc.NewJobControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.CancelJobRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CancelJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleJobControllerClient_DeleteJob() {
ctx := context.Background()
c, err := dataproc.NewJobControllerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dataprocpb.DeleteJobRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
}

1196
vendor/cloud.google.com/go/dataproc/apiv1/mock_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

BIN
vendor/cloud.google.com/go/datastore/datastore.replay generated vendored Normal file

Binary file not shown.

View File

@ -24,7 +24,10 @@ import (
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto"
"github.com/google/go-cmp/cmp"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
@ -1942,18 +1945,7 @@ func TestRoundTrip(t *testing.T) {
sortPL(*pl)
}
equal := false
switch v := got.(type) {
// Round tripping a time.Time can result in a different time.Location: Local instead of UTC.
// We therefore test equality explicitly, instead of relying on reflect.DeepEqual.
case *T:
equal = v.T.Equal(tc.want.(*T).T)
case *SpecialTime:
equal = v.MyTime.Equal(tc.want.(*SpecialTime).MyTime.Time)
default:
equal = reflect.DeepEqual(got, tc.want)
}
if !equal {
if !testutil.Equal(got, tc.want, cmp.AllowUnexported(X0{}, X2{})) {
t.Errorf("%s: compare:\ngot: %+#v\nwant: %+#v", tc.desc, got, tc.want)
continue
}
@ -2707,7 +2699,7 @@ func TestLoadSavePLS(t *testing.T) {
t.Errorf("%s: save: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(e, tc.wantSave) {
if !testutil.Equal(e, tc.wantSave) {
t.Errorf("%s: save: \ngot: %+v\nwant: %+v", tc.desc, e, tc.wantSave)
continue
}
@ -2729,7 +2721,7 @@ func TestLoadSavePLS(t *testing.T) {
t.Errorf("%s: load: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(gota, tc.wantLoad) {
if !testutil.Equal(gota, tc.wantLoad) {
t.Errorf("%s: load: \ngot: %+v\nwant: %+v", tc.desc, gota, tc.wantLoad)
continue
}
@ -2864,7 +2856,7 @@ func TestQueryConstruction(t *testing.T) {
}
continue
}
if !reflect.DeepEqual(test.q, test.exp) {
if !testutil.Equal(test.q, test.exp, cmp.AllowUnexported(Query{})) {
t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp)
}
}
@ -3322,7 +3314,7 @@ func TestKeyLoaderEndToEnd(t *testing.T) {
}
for i := range dst {
if !reflect.DeepEqual(dst[i].K, keys[i]) {
if !testutil.Equal(dst[i].K, keys[i]) {
t.Fatalf("unexpected entity %d to have key %+v, got %+v", i, keys[i], dst[i].K)
}
}

View File

@ -15,8 +15,13 @@
package datastore
import (
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"net"
"os"
"reflect"
"sort"
"strings"
@ -25,25 +30,141 @@ import (
"time"
"cloud.google.com/go/internal/testutil"
"cloud.google.com/go/rpcreplay"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
// TODO(djd): Make test entity clean up more robust: some test entities may
// be left behind if tests are aborted, the transport fails, etc.
var timeNow = time.Now()
// suffix is a timestamp-based suffix which is appended to key names,
// particularly for the root keys of entity groups. This reduces flakiness
// when the tests are run in parallel.
var suffix = fmt.Sprintf("-t%d", time.Now().UnixNano())
var suffix string
func newClient(ctx context.Context, t *testing.T) *Client {
const replayFilename = "datastore.replay"
type replayInfo struct {
ProjectID string
Time time.Time
}
var (
record = flag.Bool("record", false, "record RPCs")
newTestClient = func(ctx context.Context, t *testing.T) *Client {
return newClient(ctx, t, nil)
}
)
func TestMain(m *testing.M) {
os.Exit(testMain(m))
}
func testMain(m *testing.M) int {
flag.Parse()
if testing.Short() {
if *record {
log.Fatal("cannot combine -short and -record")
}
if _, err := os.Stat(replayFilename); err == nil {
initReplay()
}
} else if *record {
if testutil.ProjID() == "" {
log.Fatal("must record with a project ID")
}
b, err := json.Marshal(replayInfo{
ProjectID: testutil.ProjID(),
Time: timeNow,
})
if err != nil {
log.Fatal(err)
}
rec, err := rpcreplay.NewRecorder(replayFilename, b)
if err != nil {
log.Fatal(err)
}
defer func() {
if err := rec.Close(); err != nil {
log.Fatalf("closing recorder: %v", err)
}
}()
newTestClient = func(ctx context.Context, t *testing.T) *Client {
return newClient(ctx, t, rec.DialOptions())
}
log.Printf("recording to %s", replayFilename)
}
suffix = fmt.Sprintf("-t%d", timeNow.UnixNano())
return m.Run()
}
func initReplay() {
rep, err := rpcreplay.NewReplayer(replayFilename)
if err != nil {
log.Fatal(err)
}
defer rep.Close()
var ri replayInfo
if err := json.Unmarshal(rep.Initial(), &ri); err != nil {
log.Fatalf("unmarshaling initial replay info: %v", err)
}
timeNow = ri.Time.In(time.Local)
conn, err := replayConn(rep)
if err != nil {
log.Fatal(err)
}
newTestClient = func(ctx context.Context, t *testing.T) *Client {
client, err := NewClient(ctx, ri.ProjectID, option.WithGRPCConn(conn))
if err != nil {
t.Fatalf("NewClient: %v", err)
}
return client
}
log.Printf("replaying from %s", replayFilename)
}
func replayConn(rep *rpcreplay.Replayer) (*grpc.ClientConn, error) {
// If we make a real connection we need creds from somewhere, and they
// might not be available, for instance on Travis.
// Replaying doesn't require a connection live at all, but we need
// something to attach gRPC interceptors to.
// So we start a local listener and connect to it, then close them down.
// TODO(jba): build something like this into the replayer?
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
conn, err := grpc.Dial(l.Addr().String(),
append([]grpc.DialOption{grpc.WithInsecure()}, rep.DialOptions()...)...)
if err != nil {
return nil, err
}
conn.Close()
l.Close()
return conn, nil
}
func newClient(ctx context.Context, t *testing.T, dialOpts []grpc.DialOption) *Client {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ts := testutil.TokenSource(ctx, ScopeDatastore)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts))
opts := []option.ClientOption{option.WithTokenSource(ts)}
for _, opt := range dialOpts {
opts = append(opts, option.WithGRPCDialOption(opt))
}
client, err := NewClient(ctx, testutil.ProjID(), opts...)
if err != nil {
t.Fatalf("NewClient: %v", err)
}
@ -51,11 +172,8 @@ func newClient(ctx context.Context, t *testing.T) *Client {
}
func TestBasics(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx, _ := context.WithTimeout(context.Background(), time.Second*20)
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
type X struct {
@ -64,7 +182,7 @@ func TestBasics(t *testing.T) {
T time.Time
}
x0 := X{66, "99", time.Now().Truncate(time.Millisecond)}
x0 := X{66, "99", timeNow.Truncate(time.Millisecond)}
k, err := client.Put(ctx, IncompleteKey("BasicsX", nil), &x0)
if err != nil {
t.Fatalf("client.Put: %v", err)
@ -78,18 +196,14 @@ func TestBasics(t *testing.T) {
if err != nil {
t.Errorf("client.Delete: %v", err)
}
if !reflect.DeepEqual(x0, x1) {
if !testutil.Equal(x0, x1) {
t.Errorf("compare: x0=%v, x1=%v", x0, x1)
}
}
func TestTopLevelKeyLoaded(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx, _ := context.WithTimeout(context.Background(), time.Second*20)
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
completeKey := NameKey("EntityWithKey", "myent", nil)
@ -117,18 +231,15 @@ func TestTopLevelKeyLoaded(t *testing.T) {
}
// The two keys should be absolutely identical.
if !reflect.DeepEqual(e.K, k) {
if !testutil.Equal(e.K, k) {
t.Fatalf("e.K not equal to k; got %#v, want %#v", e.K, k)
}
}
func TestListValues(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
p0 := PropertyList{
@ -142,7 +253,7 @@ func TestListValues(t *testing.T) {
if err := client.Get(ctx, k, &p1); err != nil {
t.Errorf("client.Get: %v", err)
}
if !reflect.DeepEqual(p0, p1) {
if !testutil.Equal(p0, p1) {
t.Errorf("compare:\np0=%v\np1=%#v", p0, p1)
}
if err = client.Delete(ctx, k); err != nil {
@ -151,11 +262,8 @@ func TestListValues(t *testing.T) {
}
func TestGetMulti(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
type X struct {
@ -225,11 +333,8 @@ func (z Z) String() string {
}
func TestUnindexableValues(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
x1500 := strings.Repeat("x", 1500)
@ -256,11 +361,8 @@ func TestUnindexableValues(t *testing.T) {
}
func TestNilKey(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
testCases := []struct {
@ -341,15 +443,12 @@ func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent
}
func TestFilters(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
parent := NameKey("SQParent", "TestFilters"+suffix, nil)
now := time.Now().Truncate(time.Millisecond).Unix()
now := timeNow.Truncate(time.Millisecond).Unix()
children := []*SQChild{
{I: 0, T: now, U: now},
{I: 1, T: now, U: now},
@ -402,7 +501,7 @@ func TestFilters(t *testing.T) {
if err != nil {
t.Errorf("client.GetAll: %v", err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("compare: got=%v, want=%v", got, want)
}
}, func() {
@ -421,22 +520,21 @@ func TestFilters(t *testing.T) {
if err != nil {
t.Errorf("client.GetAll: %v", err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("compare: got=%v, want=%v", got, want)
}
})
}
type ckey struct{}
func TestLargeQuery(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
parent := NameKey("LQParent", "TestFilters"+suffix, nil)
now := time.Now().Truncate(time.Millisecond).Unix()
now := timeNow.Truncate(time.Millisecond).Unix()
// Make a large number of children entities.
const n = 800
@ -552,6 +650,7 @@ func TestLargeQuery(t *testing.T) {
go func(count, limit, offset, want int) {
defer wg.Done()
ctx := context.WithValue(ctx, ckey{}, fmt.Sprintf("c=%d,l=%d,o=%d", count, limit, offset))
// Run iterator through count calls to Next.
it := client.Run(ctx, q.Limit(limit).Offset(offset).KeysOnly())
for i := 0; i < count; i++ {
@ -588,7 +687,6 @@ func TestLargeQuery(t *testing.T) {
}
}(tt.count, tt.limit, tt.offset, tt.want)
}
wg.Wait()
}
@ -596,15 +694,12 @@ func TestEventualConsistency(t *testing.T) {
// TODO(jba): either make this actually test eventual consistency, or
// delete it. Currently it behaves the same with or without the
// EventualConsistency call.
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
parent := NameKey("SQParent", "TestEventualConsistency"+suffix, nil)
now := time.Now().Truncate(time.Millisecond).Unix()
now := timeNow.Truncate(time.Millisecond).Unix()
children := []*SQChild{
{I: 0, T: now, U: now},
{I: 1, T: now, U: now},
@ -623,15 +718,12 @@ func TestEventualConsistency(t *testing.T) {
}
func TestProjection(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
parent := NameKey("SQParent", "TestProjection"+suffix, nil)
now := time.Now().Truncate(time.Millisecond).Unix()
now := timeNow.Truncate(time.Millisecond).Unix()
children := []*SQChild{
{I: 1 << 0, J: 100, T: now, U: now},
{I: 1 << 1, J: 100, T: now, U: now},
@ -669,11 +761,8 @@ func TestProjection(t *testing.T) {
}
func TestAllocateIDs(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
keys := make([]*Key, 5)
@ -695,11 +784,8 @@ func TestAllocateIDs(t *testing.T) {
}
func TestGetAllWithFieldMismatch(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
type Fat struct {
@ -730,10 +816,10 @@ func TestGetAllWithFieldMismatch(t *testing.T) {
{X: 22},
}
getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got)
if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) {
if len(getKeys) != 3 && !testutil.Equal(getKeys, putKeys) {
t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want)
}
if _, ok := err.(*ErrFieldMismatch); !ok {
@ -742,11 +828,8 @@ func TestGetAllWithFieldMismatch(t *testing.T) {
}
func TestKindlessQueries(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
type Dee struct {
@ -858,7 +941,7 @@ loop:
got = append(got, dst.I)
}
sort.Ints(got)
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want)
continue
}
@ -866,11 +949,8 @@ loop:
}
func TestTransaction(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
type Counter struct {
@ -914,7 +994,7 @@ func TestTransaction(t *testing.T) {
for i, tt := range tests {
// Put a new counter.
c := &Counter{N: 10, T: time.Now()}
c := &Counter{N: 10, T: timeNow}
key, err := client.Put(ctx, IncompleteKey("TransCounter", nil), c)
if err != nil {
t.Errorf("%s: client.Put: %v", tt.desc, err)
@ -972,11 +1052,8 @@ func TestTransaction(t *testing.T) {
}
func TestNilPointers(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
type X struct {
@ -994,7 +1071,7 @@ func TestNilPointers(t *testing.T) {
xs := make([]*X, 2)
if err := client.GetMulti(ctx, keys, xs); err != nil {
t.Errorf("GetMulti: %v", err)
} else if !reflect.DeepEqual(xs, src) {
} else if !testutil.Equal(xs, src) {
t.Errorf("GetMulti fetched %v, want %v", xs, src)
}
@ -1012,11 +1089,8 @@ func TestNilPointers(t *testing.T) {
}
func TestNestedRepeatedElementNoIndex(t *testing.T) {
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
ctx := context.Background()
client := newClient(ctx, t)
client := newTestClient(ctx, t)
defer client.Close()
type Inner struct {

View File

@ -18,6 +18,8 @@ import (
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
@ -164,7 +166,7 @@ func TestLoadEntityNestedLegacy(t *testing.T) {
continue
}
if !reflect.DeepEqual(tc.want, dst) {
if !testutil.Equal(tc.want, dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
}
}
@ -407,7 +409,7 @@ func TestLoadEntityNested(t *testing.T) {
continue
}
if !reflect.DeepEqual(tc.want, dst) {
if !testutil.Equal(tc.want, dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
}
}
@ -503,7 +505,7 @@ func TestAlreadyPopulatedDst(t *testing.T) {
continue
}
if !reflect.DeepEqual(tc.want, tc.dst) {
if !testutil.Equal(tc.want, tc.dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want)
}
}
@ -748,7 +750,7 @@ func TestKeyLoader(t *testing.T) {
continue
}
if !reflect.DeepEqual(tc.want, tc.dst) {
if !testutil.Equal(tc.want, tc.dst) {
t.Errorf("%s: compare:\ngot: %+v\nwant: %+v", tc.desc, tc.dst, tc.want)
}
}

View File

@ -21,7 +21,10 @@ import (
"sort"
"testing"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto"
"github.com/google/go-cmp/cmp"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
@ -334,7 +337,7 @@ func TestSimpleQuery(t *testing.T) {
}
}
if !reflect.DeepEqual(tc.dst, tc.want) {
if !testutil.Equal(tc.dst, tc.want) {
t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want)
continue
}
@ -357,10 +360,10 @@ func TestQueriesAreImmutable(t *testing.T) {
q0 := NewQuery("foo")
q1 := NewQuery("foo")
q2 := q1.Offset(2)
if !reflect.DeepEqual(q0, q1) {
if !testutil.Equal(q0, q1, cmp.AllowUnexported(Query{})) {
t.Errorf("q0 and q1 were not equal")
}
if reflect.DeepEqual(q1, q2) {
if testutil.Equal(q1, q2, cmp.AllowUnexported(Query{})) {
t.Errorf("q1 and q2 were equal")
}
@ -381,10 +384,10 @@ func TestQueriesAreImmutable(t *testing.T) {
q4 := f()
q5 := q4.Order("y")
q6 := q4.Order("z")
if !reflect.DeepEqual(q3, q5) {
if !testutil.Equal(q3, q5, cmp.AllowUnexported(Query{})) {
t.Errorf("q3 and q5 were not equal")
}
if reflect.DeepEqual(q5, q6) {
if testutil.Equal(q5, q6, cmp.AllowUnexported(Query{})) {
t.Errorf("q5 and q6 were equal")
}
}

View File

@ -15,9 +15,10 @@
package datastore
import (
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
@ -187,7 +188,7 @@ func TestSaveEntityNested(t *testing.T) {
continue
}
if !reflect.DeepEqual(tc.want, got) {
if !testutil.Equal(tc.want, got) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want)
}
}

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -77,8 +77,8 @@ type Controller2Client struct {
// The call options for this service.
CallOptions *Controller2CallOptions
// The metadata to be sent with each request.
Metadata metadata.MD
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewController2Client creates a new controller2 client.
@ -135,7 +135,7 @@ func (c *Controller2Client) Close() error {
func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// RegisterDebuggee registers the debuggee with the controller service.
@ -149,7 +149,7 @@ func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
// from data loss, or change the debuggee_id format. Agents must handle
// debuggee_id value changing upon re-registration.
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
var resp *clouddebuggerpb.RegisterDebuggeeResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -177,7 +177,7 @@ func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebu
// until the controller removes them from the active list to avoid
// setting those breakpoints again.
func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...)
var resp *clouddebuggerpb.ListActiveBreakpointsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -200,7 +200,7 @@ func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clou
// semantics. These may only make changes such as canonicalizing a value
// or snapping the location to the correct line of code.
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...)
var resp *clouddebuggerpb.UpdateActiveBreakpointResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -40,7 +40,7 @@ func ExampleController2Client_RegisterDebuggee() {
}
req := &clouddebuggerpb.RegisterDebuggeeRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.RegisterDebuggee(ctx, req)
if err != nil {
@ -58,7 +58,7 @@ func ExampleController2Client_ListActiveBreakpoints() {
}
req := &clouddebuggerpb.ListActiveBreakpointsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.ListActiveBreakpoints(ctx, req)
if err != nil {
@ -76,7 +76,7 @@ func ExampleController2Client_UpdateActiveBreakpoint() {
}
req := &clouddebuggerpb.UpdateActiveBreakpointRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.UpdateActiveBreakpoint(ctx, req)
if err != nil {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -81,8 +81,8 @@ type Debugger2Client struct {
// The call options for this service.
CallOptions *Debugger2CallOptions
// The metadata to be sent with each request.
Metadata metadata.MD
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewDebugger2Client creates a new debugger2 client.
@ -131,12 +131,12 @@ func (c *Debugger2Client) Close() error {
func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// SetBreakpoint sets the breakpoint to the debuggee.
func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...)
var resp *clouddebuggerpb.SetBreakpointResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -152,7 +152,7 @@ func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerp
// GetBreakpoint gets breakpoint information.
func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...)
var resp *clouddebuggerpb.GetBreakpointResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -168,7 +168,7 @@ func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerp
// DeleteBreakpoint deletes the breakpoint from the debuggee.
func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@ -180,7 +180,7 @@ func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebugg
// ListBreakpoints lists all breakpoints for the debuggee.
func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...)
var resp *clouddebuggerpb.ListBreakpointsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -196,7 +196,7 @@ func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebugge
// ListDebuggees lists all the debuggees that the user has access to.
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)
var resp *clouddebuggerpb.ListDebuggeesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -40,7 +40,7 @@ func ExampleDebugger2Client_SetBreakpoint() {
}
req := &clouddebuggerpb.SetBreakpointRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.SetBreakpoint(ctx, req)
if err != nil {
@ -58,7 +58,7 @@ func ExampleDebugger2Client_GetBreakpoint() {
}
req := &clouddebuggerpb.GetBreakpointRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.GetBreakpoint(ctx, req)
if err != nil {
@ -76,7 +76,7 @@ func ExampleDebugger2Client_DeleteBreakpoint() {
}
req := &clouddebuggerpb.DeleteBreakpointRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
err = c.DeleteBreakpoint(ctx, req)
if err != nil {
@ -92,7 +92,7 @@ func ExampleDebugger2Client_ListBreakpoints() {
}
req := &clouddebuggerpb.ListBreakpointsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.ListBreakpoints(ctx, req)
if err != nil {
@ -110,7 +110,7 @@ func ExampleDebugger2Client_ListDebuggees() {
}
req := &clouddebuggerpb.ListDebuggeesRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.ListDebuggees(ctx, req)
if err != nil {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -17,7 +17,7 @@
// Package debugger is an auto-generated package for the
// Stackdriver Debugger API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Examines the call stack and variables of a running application
// without stopping or slowing it down.

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -21,6 +21,7 @@ import (
)
import (
"fmt"
"strconv"
"testing"
"time"
@ -31,6 +32,7 @@ import (
"google.golang.org/api/option"
)
var _ = fmt.Sprintf
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -35,10 +35,10 @@ import (
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
DeidentifyContent []gax.CallOption
AnalyzeDataSourceRisk []gax.CallOption
InspectContent []gax.CallOption
RedactContent []gax.CallOption
DeidentifyContent []gax.CallOption
AnalyzeDataSourceRisk []gax.CallOption
CreateInspectOperation []gax.CallOption
ListInspectFindings []gax.CallOption
ListInfoTypes []gax.CallOption
@ -68,10 +68,10 @@ func defaultCallOptions() *CallOptions {
},
}
return &CallOptions{
DeidentifyContent: retry[[2]string{"default", "idempotent"}],
AnalyzeDataSourceRisk: retry[[2]string{"default", "idempotent"}],
InspectContent: retry[[2]string{"default", "non_idempotent"}],
RedactContent: retry[[2]string{"default", "non_idempotent"}],
DeidentifyContent: retry[[2]string{"default", "idempotent"}],
AnalyzeDataSourceRisk: retry[[2]string{"default", "idempotent"}],
CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
ListInspectFindings: retry[[2]string{"default", "idempotent"}],
ListInfoTypes: retry[[2]string{"default", "idempotent"}],
@ -95,8 +95,8 @@ type Client struct {
// The call options for this service.
CallOptions *CallOptions
// The metadata to be sent with each request.
Metadata metadata.MD
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new dlp service client.
@ -150,57 +150,13 @@ func (c *Client) Close() error {
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ResultPath returns the path for the result resource.
func ResultPath(result string) string {
return "" +
"inspect/results/" +
result +
""
}
// DeidentifyContent de-identifies potentially sensitive info from a list of strings.
// This method has limits on input size and output size.
func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
var resp *dlppb.DeidentifyContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
// Cloud Platform repository.
func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) {
ctx = insertMetadata(ctx, c.Metadata)
opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &AnalyzeDataSourceRiskOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// InspectContent finds potentially sensitive info in a list of strings.
// This method has limits on input size, processing time, and output size.
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
var resp *dlppb.InspectContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -217,7 +173,7 @@ func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRe
// RedactContent redacts potentially sensitive info from a list of strings.
// This method has limits on input size, processing time, and output size.
func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
var resp *dlppb.RedactContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -231,10 +187,46 @@ func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequ
return resp, nil
}
// DeidentifyContent de-identifies potentially sensitive info from a list of strings.
// This method has limits on input size and output size.
func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
var resp *dlppb.DeidentifyContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
// Cloud Platform repository.
func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &AnalyzeDataSourceRiskOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
// repository.
func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -252,7 +244,7 @@ func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateIn
// ListInspectFindings returns list of results for given inspect operation result set id.
func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
var resp *dlppb.ListInspectFindingsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -268,7 +260,7 @@ func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspect
// ListInfoTypes returns sensitive information types for given category.
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
var resp *dlppb.ListInfoTypesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -284,7 +276,7 @@ func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequ
// ListRootCategories returns the list of root categories of sensitive information.
func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
var resp *dlppb.ListRootCategoriesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -32,47 +32,6 @@ func ExampleNewClient() {
_ = c
}
func ExampleClient_DeidentifyContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.DeidentifyContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.DeidentifyContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_AnalyzeDataSourceRisk() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.AnalyzeDataSourceRiskRequest{
// TODO: Fill request struct fields.
}
op, err := c.AnalyzeDataSourceRisk(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_InspectContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
@ -81,7 +40,7 @@ func ExampleClient_InspectContent() {
}
req := &dlppb.InspectContentRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.InspectContent(ctx, req)
if err != nil {
@ -99,7 +58,7 @@ func ExampleClient_RedactContent() {
}
req := &dlppb.RedactContentRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.RedactContent(ctx, req)
if err != nil {
@ -109,6 +68,47 @@ func ExampleClient_RedactContent() {
_ = resp
}
func ExampleClient_DeidentifyContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.DeidentifyContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.DeidentifyContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_AnalyzeDataSourceRisk() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.AnalyzeDataSourceRiskRequest{
// TODO: Fill request struct fields.
}
op, err := c.AnalyzeDataSourceRisk(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_CreateInspectOperation() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
@ -117,7 +117,7 @@ func ExampleClient_CreateInspectOperation() {
}
req := &dlppb.CreateInspectOperationRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
op, err := c.CreateInspectOperation(ctx, req)
if err != nil {
@ -140,7 +140,7 @@ func ExampleClient_ListInspectFindings() {
}
req := &dlppb.ListInspectFindingsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.ListInspectFindings(ctx, req)
if err != nil {
@ -158,7 +158,7 @@ func ExampleClient_ListInfoTypes() {
}
req := &dlppb.ListInfoTypesRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.ListInfoTypes(ctx, req)
if err != nil {
@ -176,7 +176,7 @@ func ExampleClient_ListRootCategories() {
}
req := &dlppb.ListRootCategoriesRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.ListRootCategories(ctx, req)
if err != nil {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -17,7 +17,7 @@
// Package dlp is an auto-generated package for the
// DLP API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// The Google Data Loss Prevention API provides methods for detection of
// privacy-sensitive fragments in text, images, and Google Cloud Platform

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -186,156 +186,6 @@ func TestMain(m *testing.M) {
os.Exit(m.Run())
}
func TestDlpServiceDeidentifyContent(t *testing.T) {
var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.DeidentifyContentRequest{
DeidentifyConfig: deidentifyConfig,
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.DeidentifyContent(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceDeidentifyContentError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.DeidentifyContentRequest{
DeidentifyConfig: deidentifyConfig,
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.DeidentifyContent(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) {
var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{}
mockDlp.err = nil
mockDlp.reqs = nil
any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
var request = &dlppb.AnalyzeDataSourceRiskRequest{
PrivacyMetric: privacyMetric,
SourceTable: sourceTable,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = nil
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
var request = &dlppb.AnalyzeDataSourceRiskRequest{
PrivacyMetric: privacyMetric,
SourceTable: sourceTable,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceInspectContent(t *testing.T) {
var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{}
@ -538,6 +388,156 @@ func TestDlpServiceRedactContentError(t *testing.T) {
}
_ = resp
}
func TestDlpServiceDeidentifyContent(t *testing.T) {
var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.DeidentifyContentRequest{
DeidentifyConfig: deidentifyConfig,
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.DeidentifyContent(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceDeidentifyContentError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.DeidentifyContentRequest{
DeidentifyConfig: deidentifyConfig,
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.DeidentifyContent(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) {
var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{}
mockDlp.err = nil
mockDlp.reqs = nil
any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
var request = &dlppb.AnalyzeDataSourceRiskRequest{
PrivacyMetric: privacyMetric,
SourceTable: sourceTable,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = nil
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
var request = &dlppb.AnalyzeDataSourceRiskRequest{
PrivacyMetric: privacyMetric,
SourceTable: sourceTable,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceCreateInspectOperation(t *testing.T) {
var name2 string = "name2-1052831874"
var expectedResponse = &dlppb.InspectOperationResult{
@ -678,7 +678,7 @@ func TestDlpServiceListInspectFindings(t *testing.T) {
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var formattedName string = ResultPath("[RESULT]")
var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]")
var request = &dlppb.ListInspectFindingsRequest{
Name: formattedName,
}
@ -707,7 +707,7 @@ func TestDlpServiceListInspectFindingsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var formattedName string = ResultPath("[RESULT]")
var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]")
var request = &dlppb.ListInspectFindingsRequest{
Name: formattedName,
}

View File

@ -0,0 +1,27 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dlp
// ResultPath returns the path for the result resource.
//
// Deprecated: Use
// fmt.Sprintf("inspect/results/%s", result)
// instead.
func ResultPath(result string) string {
return "" +
"inspect/results/" +
result +
""
}

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -21,6 +21,7 @@ import (
)
import (
"fmt"
"strconv"
"testing"
"time"
@ -31,6 +32,7 @@ import (
"google.golang.org/api/option"
)
var _ = fmt.Sprintf
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
@ -53,7 +55,7 @@ func TestReportErrorsServiceSmoke(t *testing.T) {
t.Fatal(err)
}
var formattedProjectName string = ReportErrorsProjectPath(projectId)
var formattedProjectName string = fmt.Sprintf("projects/%s", projectId)
var message string = "[MESSAGE]"
var service string = "[SERVICE]"
var serviceContext = &clouderrorreportingpb.ServiceContext{

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -17,7 +17,7 @@
// Package errorreporting is an auto-generated package for the
// Stackdriver Error Reporting API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Stackdriver Error Reporting groups and counts similar errors from cloud
// services. The Stackdriver Error Reporting API provides a way to report new

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -75,8 +75,8 @@ type ErrorGroupClient struct {
// The call options for this service.
CallOptions *ErrorGroupCallOptions
// The metadata to be sent with each request.
Metadata metadata.MD
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewErrorGroupClient creates a new error group service client.
@ -114,22 +114,12 @@ func (c *ErrorGroupClient) Close() error {
func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ErrorGroupGroupPath returns the path for the group resource.
func ErrorGroupGroupPath(project, group string) string {
return "" +
"projects/" +
project +
"/groups/" +
group +
""
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// GetGroup get the specified group.
func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
var resp *clouderrorreportingpb.ErrorGroup
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -146,7 +136,7 @@ func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportin
// UpdateGroup replace the data for the specified group.
// Fails if the group does not exist.
func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
var resp *clouderrorreportingpb.ErrorGroup
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -40,7 +40,7 @@ func ExampleErrorGroupClient_GetGroup() {
}
req := &clouderrorreportingpb.GetGroupRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.GetGroup(ctx, req)
if err != nil {
@ -58,7 +58,7 @@ func ExampleErrorGroupClient_UpdateGroup() {
}
req := &clouderrorreportingpb.UpdateGroupRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.UpdateGroup(ctx, req)
if err != nil {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -79,8 +79,8 @@ type ErrorStatsClient struct {
// The call options for this service.
CallOptions *ErrorStatsCallOptions
// The metadata to be sent with each request.
Metadata metadata.MD
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewErrorStatsClient creates a new error stats service client.
@ -119,20 +119,12 @@ func (c *ErrorStatsClient) Close() error {
func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ErrorStatsProjectPath returns the path for the project resource.
func ErrorStatsProjectPath(project string) string {
return "" +
"projects/" +
project +
""
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ListGroupStats lists the specified groups.
func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...)
it := &ErrorGroupStatsIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) {
@ -167,7 +159,7 @@ func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorre
// ListEvents lists the specified events.
func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...)
it := &ErrorEventIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) {
@ -202,7 +194,7 @@ func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreport
// DeleteEvents deletes all error events of a given project.
func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...)
var resp *clouderrorreportingpb.DeleteEventsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -41,7 +41,7 @@ func ExampleErrorStatsClient_ListGroupStats() {
}
req := &clouderrorreportingpb.ListGroupStatsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
it := c.ListGroupStats(ctx, req)
for {
@ -65,7 +65,7 @@ func ExampleErrorStatsClient_ListEvents() {
}
req := &clouderrorreportingpb.ListEventsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
it := c.ListEvents(ctx, req)
for {
@ -89,7 +89,7 @@ func ExampleErrorStatsClient_DeleteEvents() {
}
req := &clouderrorreportingpb.DeleteEventsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.DeleteEvents(ctx, req)
if err != nil {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -208,7 +208,7 @@ func TestErrorGroupServiceGetGroup(t *testing.T) {
mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse)
var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]")
var formattedGroupName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]")
var request = &clouderrorreportingpb.GetGroupRequest{
GroupName: formattedGroupName,
}
@ -237,7 +237,7 @@ func TestErrorGroupServiceGetGroupError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorGroup.err = gstatus.Error(errCode, "test error")
var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]")
var formattedGroupName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]")
var request = &clouderrorreportingpb.GetGroupRequest{
GroupName: formattedGroupName,
}
@ -331,7 +331,7 @@ func TestErrorStatsServiceListGroupStats(t *testing.T) {
mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse)
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{}
var request = &clouderrorreportingpb.ListGroupStatsRequest{
ProjectName: formattedProjectName,
@ -372,7 +372,7 @@ func TestErrorStatsServiceListGroupStatsError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorStats.err = gstatus.Error(errCode, "test error")
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{}
var request = &clouderrorreportingpb.ListGroupStatsRequest{
ProjectName: formattedProjectName,
@ -407,7 +407,7 @@ func TestErrorStatsServiceListEvents(t *testing.T) {
mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse)
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var groupId string = "groupId506361563"
var request = &clouderrorreportingpb.ListEventsRequest{
ProjectName: formattedProjectName,
@ -448,7 +448,7 @@ func TestErrorStatsServiceListEventsError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorStats.err = gstatus.Error(errCode, "test error")
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var groupId string = "groupId506361563"
var request = &clouderrorreportingpb.ListEventsRequest{
ProjectName: formattedProjectName,
@ -477,7 +477,7 @@ func TestErrorStatsServiceDeleteEvents(t *testing.T) {
mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse)
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &clouderrorreportingpb.DeleteEventsRequest{
ProjectName: formattedProjectName,
}
@ -506,7 +506,7 @@ func TestErrorStatsServiceDeleteEventsError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorStats.err = gstatus.Error(errCode, "test error")
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &clouderrorreportingpb.DeleteEventsRequest{
ProjectName: formattedProjectName,
}
@ -533,7 +533,7 @@ func TestReportErrorsServiceReportErrorEvent(t *testing.T) {
mockReportErrors.resps = append(mockReportErrors.resps[:0], expectedResponse)
var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]")
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{}
var request = &clouderrorreportingpb.ReportErrorEventRequest{
ProjectName: formattedProjectName,
@ -564,7 +564,7 @@ func TestReportErrorsServiceReportErrorEventError(t *testing.T) {
errCode := codes.PermissionDenied
mockReportErrors.err = gstatus.Error(errCode, "test error")
var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]")
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{}
var request = &clouderrorreportingpb.ReportErrorEventRequest{
ProjectName: formattedProjectName,

View File

@ -0,0 +1,51 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
// ResultPath returns the path for the result resource.
//
// Deprecated: Use
// fmt.Sprintf("inspect/results/%s", result)
// instead.
func ResultPath(result string) string {
return "" +
"inspect/results/" +
result +
""
}
// ErrorStatsProjectPath returns the path for the project resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s", project)
// instead.
func ErrorStatsProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}
// ReportErrorsProjectPath returns the path for the project resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s", project)
// instead.
func ReportErrorsProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -57,8 +57,8 @@ type ReportErrorsClient struct {
// The call options for this service.
CallOptions *ReportErrorsCallOptions
// The metadata to be sent with each request.
Metadata metadata.MD
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewReportErrorsClient creates a new report errors service client.
@ -96,15 +96,7 @@ func (c *ReportErrorsClient) Close() error {
func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ReportErrorsProjectPath returns the path for the project resource.
func ReportErrorsProjectPath(project string) string {
return "" +
"projects/" +
project +
""
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ReportErrorEvent report an individual error event.
@ -115,7 +107,7 @@ func ReportErrorsProjectPath(project string) string {
// for authentication. To use an API key, append it to the URL as the value of
// a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)
var resp *clouderrorreportingpb.ReportErrorEventResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -40,7 +40,7 @@ func ExampleReportErrorsClient_ReportErrorEvent() {
}
req := &clouderrorreportingpb.ReportErrorEventRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.ReportErrorEvent(ctx, req)
if err != nil {

View File

@ -61,6 +61,7 @@ type Config struct {
type Entry struct {
Error error
Req *http.Request // if error is associated with a request.
Stack []byte // if user does not provide a stack trace, runtime.Stack will be called
}
// Client represents a Google Cloud Error Reporting client.
@ -139,13 +140,21 @@ func (c *Client) Close() error {
// Report writes an error report. It doesn't block. Errors in
// writing the error report can be handled via Client.OnError.
func (c *Client) Report(e Entry) {
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error())
var stack string
if e.Stack != nil {
stack = string(e.Stack)
}
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack)
c.bundler.Add(req, 1)
}
// ReportSync writes an error report. It blocks until the entry is written.
func (c *Client) ReportSync(ctx context.Context, e Entry) error {
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error())
var stack string
if e.Stack != nil {
stack = string(e.Stack)
}
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack)
_, err := c.apiClient.ReportErrorEvent(ctx, req)
return err
}
@ -159,11 +168,13 @@ func (c *Client) Flush() {
c.bundler.Flush()
}
func (c *Client) makeReportErrorEventRequest(r *http.Request, msg string) *erpb.ReportErrorEventRequest {
// limit the stack trace to 16k.
var buf [16 * 1024]byte
stack := buf[0:runtime.Stack(buf[:], false)]
message := msg + "\n" + chopStack(stack)
func (c *Client) makeReportErrorEventRequest(r *http.Request, msg string, stack string) *erpb.ReportErrorEventRequest {
if stack == "" {
// limit the stack trace to 16k.
var buf [16 * 1024]byte
stack = chopStack(buf[0:runtime.Stack(buf[:], false)])
}
message := msg + "\n" + stack
var errorContext *erpb.ErrorContext
if r != nil {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -111,8 +111,8 @@ type Client struct {
// The call options for this service.
CallOptions *CallOptions
// The metadata to be sent with each request.
Metadata metadata.MD
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new firestore client.
@ -168,59 +168,12 @@ func (c *Client) Close() error {
func (c *Client) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// DatabaseRootPath returns the path for the database root resource.
func DatabaseRootPath(project, database string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
""
}
// DocumentRootPath returns the path for the document root resource.
func DocumentRootPath(project, database string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents" +
""
}
// DocumentPathPath returns the path for the document path resource.
func DocumentPathPath(project, database, documentPath string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents/" +
documentPath +
""
}
// AnyPathPath returns the path for the any path resource.
func AnyPathPath(project, database, document, anyPath string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents/" +
document +
"/" +
anyPath +
""
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// GetDocument gets a single document.
func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetDocument[0:len(c.CallOptions.GetDocument):len(c.CallOptions.GetDocument)], opts...)
var resp *firestorepb.Document
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -236,7 +189,7 @@ func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRe
// ListDocuments lists documents.
func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest, opts ...gax.CallOption) *DocumentIterator {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListDocuments[0:len(c.CallOptions.ListDocuments):len(c.CallOptions.ListDocuments)], opts...)
it := &DocumentIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*firestorepb.Document, string, error) {
@ -271,7 +224,7 @@ func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumen
// CreateDocument creates a new document.
func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateDocument[0:len(c.CallOptions.CreateDocument):len(c.CallOptions.CreateDocument)], opts...)
var resp *firestorepb.Document
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -287,7 +240,7 @@ func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocu
// UpdateDocument updates or inserts a document.
func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateDocument[0:len(c.CallOptions.UpdateDocument):len(c.CallOptions.UpdateDocument)], opts...)
var resp *firestorepb.Document
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -303,7 +256,7 @@ func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocu
// DeleteDocument deletes a document.
func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteDocument[0:len(c.CallOptions.DeleteDocument):len(c.CallOptions.DeleteDocument)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@ -318,7 +271,7 @@ func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocu
// Documents returned by this method are not guaranteed to be returned in the
// same order that they were requested.
func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGetDocumentsRequest, opts ...gax.CallOption) (firestorepb.Firestore_BatchGetDocumentsClient, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.BatchGetDocuments[0:len(c.CallOptions.BatchGetDocuments):len(c.CallOptions.BatchGetDocuments)], opts...)
var resp firestorepb.Firestore_BatchGetDocumentsClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -334,7 +287,7 @@ func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGe
// BeginTransaction starts a new transaction.
func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest, opts ...gax.CallOption) (*firestorepb.BeginTransactionResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...)
var resp *firestorepb.BeginTransactionResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -350,7 +303,7 @@ func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTra
// Commit commits a transaction, while optionally updating documents.
func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opts ...gax.CallOption) (*firestorepb.CommitResponse, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...)
var resp *firestorepb.CommitResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -366,7 +319,7 @@ func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opt
// Rollback rolls back a transaction.
func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@ -378,7 +331,7 @@ func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest,
// RunQuery runs a query.
func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest, opts ...gax.CallOption) (firestorepb.Firestore_RunQueryClient, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RunQuery[0:len(c.CallOptions.RunQuery):len(c.CallOptions.RunQuery)], opts...)
var resp firestorepb.Firestore_RunQueryClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -394,7 +347,7 @@ func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest,
// Write streams batches of document updates and deletes, in order.
func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_WriteClient, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.Write[0:len(c.CallOptions.Write):len(c.CallOptions.Write)], opts...)
var resp firestorepb.Firestore_WriteClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -410,7 +363,7 @@ func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb
// Listen listens to changes.
func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_ListenClient, error) {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.Listen[0:len(c.CallOptions.Listen):len(c.CallOptions.Listen)], opts...)
var resp firestorepb.Firestore_ListenClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -426,7 +379,7 @@ func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorep
// ListCollectionIds lists all the collection IDs underneath a document.
func (c *Client) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest, opts ...gax.CallOption) *StringIterator {
ctx = insertMetadata(ctx, c.Metadata)
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListCollectionIds[0:len(c.CallOptions.ListCollectionIds):len(c.CallOptions.ListCollectionIds)], opts...)
it := &StringIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -43,7 +43,7 @@ func ExampleClient_GetDocument() {
}
req := &firestorepb.GetDocumentRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.GetDocument(ctx, req)
if err != nil {
@ -61,7 +61,7 @@ func ExampleClient_ListDocuments() {
}
req := &firestorepb.ListDocumentsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
it := c.ListDocuments(ctx, req)
for {
@ -85,7 +85,7 @@ func ExampleClient_CreateDocument() {
}
req := &firestorepb.CreateDocumentRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.CreateDocument(ctx, req)
if err != nil {
@ -103,7 +103,7 @@ func ExampleClient_UpdateDocument() {
}
req := &firestorepb.UpdateDocumentRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.UpdateDocument(ctx, req)
if err != nil {
@ -121,7 +121,7 @@ func ExampleClient_DeleteDocument() {
}
req := &firestorepb.DeleteDocumentRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
err = c.DeleteDocument(ctx, req)
if err != nil {
@ -137,7 +137,7 @@ func ExampleClient_BatchGetDocuments() {
}
req := &firestorepb.BatchGetDocumentsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
stream, err := c.BatchGetDocuments(ctx, req)
if err != nil {
@ -164,7 +164,7 @@ func ExampleClient_BeginTransaction() {
}
req := &firestorepb.BeginTransactionRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.BeginTransaction(ctx, req)
if err != nil {
@ -182,7 +182,7 @@ func ExampleClient_Commit() {
}
req := &firestorepb.CommitRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
resp, err := c.Commit(ctx, req)
if err != nil {
@ -200,7 +200,7 @@ func ExampleClient_Rollback() {
}
req := &firestorepb.RollbackRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
err = c.Rollback(ctx, req)
if err != nil {
@ -216,7 +216,7 @@ func ExampleClient_RunQuery() {
}
req := &firestorepb.RunQueryRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
stream, err := c.RunQuery(ctx, req)
if err != nil {
@ -247,7 +247,7 @@ func ExampleClient_Write() {
}
go func() {
reqs := []*firestorepb.WriteRequest{
// TODO: Create requests.
// TODO: Create requests.
}
for _, req := range reqs {
if err := stream.Send(req); err != nil {
@ -281,7 +281,7 @@ func ExampleClient_Listen() {
}
go func() {
reqs := []*firestorepb.ListenRequest{
// TODO: Create requests.
// TODO: Create requests.
}
for _, req := range reqs {
if err := stream.Send(req); err != nil {
@ -311,7 +311,7 @@ func ExampleClient_ListCollectionIds() {
}
req := &firestorepb.ListCollectionIdsRequest{
// TODO: Fill request struct fields.
// TODO: Fill request struct fields.
}
it := c.ListCollectionIds(ctx, req)
for {

View File

@ -1,10 +1,10 @@
// Copyright 2017, Google Inc. All rights reserved.
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -293,7 +293,7 @@ func TestFirestoreGetDocument(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedName string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedName string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var request = &firestorepb.GetDocumentRequest{
Name: formattedName,
}
@ -322,7 +322,7 @@ func TestFirestoreGetDocumentError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedName string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedName string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var request = &firestorepb.GetDocumentRequest{
Name: formattedName,
}
@ -355,7 +355,7 @@ func TestFirestoreListDocuments(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var collectionId string = "collectionId-821242276"
var request = &firestorepb.ListDocumentsRequest{
Parent: formattedParent,
@ -396,7 +396,7 @@ func TestFirestoreListDocumentsError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var collectionId string = "collectionId-821242276"
var request = &firestorepb.ListDocumentsRequest{
Parent: formattedParent,
@ -428,7 +428,7 @@ func TestFirestoreCreateDocument(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var collectionId string = "collectionId-821242276"
var documentId string = "documentId506676927"
var document *firestorepb.Document = &firestorepb.Document{}
@ -463,7 +463,7 @@ func TestFirestoreCreateDocumentError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var collectionId string = "collectionId-821242276"
var documentId string = "documentId506676927"
var document *firestorepb.Document = &firestorepb.Document{}
@ -559,7 +559,7 @@ func TestFirestoreDeleteDocument(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedName string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedName string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var request = &firestorepb.DeleteDocumentRequest{
Name: formattedName,
}
@ -585,7 +585,7 @@ func TestFirestoreDeleteDocumentError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedName string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedName string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var request = &firestorepb.DeleteDocumentRequest{
Name: formattedName,
}
@ -618,7 +618,7 @@ func TestFirestoreBatchGetDocuments(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var documents []string = nil
var request = &firestorepb.BatchGetDocumentsRequest{
Database: formattedDatabase,
@ -653,7 +653,7 @@ func TestFirestoreBatchGetDocumentsError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var documents []string = nil
var request = &firestorepb.BatchGetDocumentsRequest{
Database: formattedDatabase,
@ -689,7 +689,7 @@ func TestFirestoreBeginTransaction(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var request = &firestorepb.BeginTransactionRequest{
Database: formattedDatabase,
}
@ -718,7 +718,7 @@ func TestFirestoreBeginTransactionError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var request = &firestorepb.BeginTransactionRequest{
Database: formattedDatabase,
}
@ -745,7 +745,7 @@ func TestFirestoreCommit(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var writes []*firestorepb.Write = nil
var request = &firestorepb.CommitRequest{
Database: formattedDatabase,
@ -776,7 +776,7 @@ func TestFirestoreCommitError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var writes []*firestorepb.Write = nil
var request = &firestorepb.CommitRequest{
Database: formattedDatabase,
@ -805,7 +805,7 @@ func TestFirestoreRollback(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var transaction []byte = []byte("-34")
var request = &firestorepb.RollbackRequest{
Database: formattedDatabase,
@ -833,7 +833,7 @@ func TestFirestoreRollbackError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var transaction []byte = []byte("-34")
var request = &firestorepb.RollbackRequest{
Database: formattedDatabase,
@ -866,7 +866,7 @@ func TestFirestoreRunQuery(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var request = &firestorepb.RunQueryRequest{
Parent: formattedParent,
}
@ -899,7 +899,7 @@ func TestFirestoreRunQueryError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var request = &firestorepb.RunQueryRequest{
Parent: formattedParent,
}
@ -935,7 +935,7 @@ func TestFirestoreWrite(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var request = &firestorepb.WriteRequest{
Database: formattedDatabase,
}
@ -974,7 +974,7 @@ func TestFirestoreWriteError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var request = &firestorepb.WriteRequest{
Database: formattedDatabase,
}
@ -1011,7 +1011,7 @@ func TestFirestoreListen(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var request = &firestorepb.ListenRequest{
Database: formattedDatabase,
}
@ -1050,7 +1050,7 @@ func TestFirestoreListenError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]")
var formattedDatabase string = fmt.Sprintf("projects/%s/databases/%s", "[PROJECT]", "[DATABASE]")
var request = &firestorepb.ListenRequest{
Database: formattedDatabase,
}
@ -1093,7 +1093,7 @@ func TestFirestoreListCollectionIds(t *testing.T) {
mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse)
var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var request = &firestorepb.ListCollectionIdsRequest{
Parent: formattedParent,
}
@ -1132,7 +1132,7 @@ func TestFirestoreListCollectionIdsError(t *testing.T) {
errCode := codes.PermissionDenied
mockFirestore.err = gstatus.Error(errCode, "test error")
var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var formattedParent string = fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]")
var request = &firestorepb.ListCollectionIdsRequest{
Parent: formattedParent,
}

View File

@ -0,0 +1,78 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package firestore
// DatabaseRootPath returns the path for the database root resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/databases/%s", project, database)
// instead.
func DatabaseRootPath(project, database string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
""
}
// DocumentRootPath returns the path for the document root resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/databases/%s/documents", project, database)
// instead.
func DocumentRootPath(project, database string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents" +
""
}
// DocumentPathPath returns the path for the document path resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/databases/%s/documents/%s", project, database, documentPath)
// instead.
func DocumentPathPath(project, database, documentPath string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents/" +
documentPath +
""
}
// AnyPathPath returns the path for the any path resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/databases/%s/documents/%s/%s", project, database, document, anyPath)
// instead.
func AnyPathPath(project, database, document, anyPath string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents/" +
document +
"/" +
anyPath +
""
}

View File

@ -209,7 +209,7 @@ func (c *Client) Batch() *WriteBatch {
}
// commit calls the Commit RPC outside of a transaction.
func (c *Client) commit(ctx context.Context, ws []*pb.Write) (*WriteResult, error) {
func (c *Client) commit(ctx context.Context, ws []*pb.Write) ([]*WriteResult, error) {
if err := checkTransaction(ctx); err != nil {
return nil, err
}
@ -224,7 +224,23 @@ func (c *Client) commit(ctx context.Context, ws []*pb.Write) (*WriteResult, erro
if len(res.WriteResults) == 0 {
return nil, errors.New("firestore: missing WriteResult")
}
return writeResultFromProto(res.WriteResults[0])
var wrs []*WriteResult
for _, pwr := range res.WriteResults {
wr, err := writeResultFromProto(pwr)
if err != nil {
return nil, err
}
wrs = append(wrs, wr)
}
return wrs, nil
}
func (c *Client) commit1(ctx context.Context, ws []*pb.Write) (*WriteResult, error) {
wrs, err := c.commit(ctx, ws)
if err != nil {
return nil, err
}
return wrs[0], nil
}
// A WriteResult is returned by methods that write documents.

View File

@ -17,13 +17,10 @@ package firestore
import (
"testing"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"cloud.google.com/go/internal/pretty"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"golang.org/x/net/context"
)
var testClient = &Client{
@ -156,10 +153,8 @@ func TestGetAll(t *testing.T) {
t.Fatal(err)
}
}
if !testEqual(got, want) {
got.c = nil
want.c = nil
t.Errorf("#%d: got %+v, want %+v", i, pretty.Value(got), pretty.Value(want))
if diff := testDiff(got, want); diff != "" {
t.Errorf("#%d: got=--, want==++\n%s", i, diff)
}
}
}

View File

@ -17,7 +17,6 @@ package firestore
import (
"math/rand"
"os"
"reflect"
"sync"
"time"
@ -47,15 +46,6 @@ type CollectionRef struct {
Query
}
func (c1 *CollectionRef) equal(c2 *CollectionRef) bool {
return c1.c == c2.c &&
c1.parentPath == c2.parentPath &&
c1.Parent.equal(c2.Parent) &&
c1.Path == c2.Path &&
c1.ID == c2.ID &&
reflect.DeepEqual(c1.Query, c2.Query)
}
func newTopLevelCollRef(c *Client, dbPath, id string) *CollectionRef {
return &CollectionRef{
c: c,

View File

@ -0,0 +1,238 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A runner for the cross-language tests.
package firestore
import (
"encoding/json"
"fmt"
"io/ioutil"
"path"
"path/filepath"
"strings"
"testing"
pb "cloud.google.com/go/firestore/genproto"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
ts "github.com/golang/protobuf/ptypes/timestamp"
"golang.org/x/net/context"
fspb "google.golang.org/genproto/googleapis/firestore/v1beta1"
)
func TestCrossLanguageTests(t *testing.T) {
const dir = "testdata"
fis, err := ioutil.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
n := 0
for _, fi := range fis {
if strings.HasSuffix(fi.Name(), ".textproto") {
runTestFromFile(t, filepath.Join(dir, fi.Name()))
n++
}
}
t.Logf("ran %d cross-language tests", n)
}
func runTestFromFile(t *testing.T, filename string) {
bytes, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatalf("%s: %v", filename, err)
}
var test pb.Test
if err := proto.UnmarshalText(string(bytes), &test); err != nil {
t.Fatalf("unmarshalling %s: %v", filename, err)
}
msg := fmt.Sprintf("%s (file %s)", test.Description, filepath.Base(filename))
runTest(t, msg, &test)
}
func runTest(t *testing.T, msg string, test *pb.Test) {
check := func(gotErr error, wantErr bool) {
if wantErr && gotErr == nil {
t.Errorf("%s: got nil, want error", msg)
} else if !wantErr && gotErr != nil {
t.Errorf("%s: %v", msg, gotErr)
}
}
ctx := context.Background()
c, srv := newMock(t)
switch tt := test.Test.(type) {
case *pb.Test_Get:
srv.addRPC(tt.Get.Request, &fspb.Document{
CreateTime: &ts.Timestamp{},
UpdateTime: &ts.Timestamp{},
})
ref := docRefFromPath(tt.Get.DocRefPath, c)
_, err := ref.Get(ctx)
if err != nil {
t.Errorf("%s: %v", msg, err)
return
}
// Checking response would just be testing the function converting a Document
// proto to a DocumentSnapshot, hence uninteresting.
case *pb.Test_Create:
srv.addRPC(tt.Create.Request, commitResponseForSet)
ref := docRefFromPath(tt.Create.DocRefPath, c)
data, err := convertData(tt.Create.JsonData)
if err != nil {
t.Errorf("%s: %v", msg, err)
return
}
_, err = ref.Create(ctx, data)
check(err, tt.Create.IsError)
case *pb.Test_Set:
srv.addRPC(tt.Set.Request, commitResponseForSet)
ref := docRefFromPath(tt.Set.DocRefPath, c)
data, err := convertData(tt.Set.JsonData)
if err != nil {
t.Errorf("%s: %v", msg, err)
return
}
var opts []SetOption
if tt.Set.Option != nil {
opts = []SetOption{convertSetOption(tt.Set.Option)}
}
_, err = ref.Set(ctx, data, opts...)
check(err, tt.Set.IsError)
case *pb.Test_Update:
// Ignore Update test because we only support UpdatePaths.
// Not to worry, every Update test has a corresponding UpdatePaths test.
case *pb.Test_UpdatePaths:
srv.addRPC(tt.UpdatePaths.Request, commitResponseForSet)
ref := docRefFromPath(tt.UpdatePaths.DocRefPath, c)
preconds := convertPrecondition(t, tt.UpdatePaths.Precondition)
paths := convertFieldPaths(tt.UpdatePaths.FieldPaths)
var ups []Update
for i, path := range paths {
jsonValue := tt.UpdatePaths.JsonValues[i]
var val interface{}
if err := json.Unmarshal([]byte(jsonValue), &val); err != nil {
t.Fatalf("%s: %q: %v", msg, jsonValue, err)
}
ups = append(ups, Update{
FieldPath: path,
Value: convertTestValue(val),
})
}
_, err := ref.Update(ctx, ups, preconds...)
check(err, tt.UpdatePaths.IsError)
case *pb.Test_Delete:
srv.addRPC(tt.Delete.Request, commitResponseForSet)
ref := docRefFromPath(tt.Delete.DocRefPath, c)
preconds := convertPrecondition(t, tt.Delete.Precondition)
_, err := ref.Delete(ctx, preconds...)
check(err, tt.Delete.IsError)
default:
t.Fatalf("unknown test type %T", tt)
}
}
func docRefFromPath(p string, c *Client) *DocumentRef {
return &DocumentRef{
Path: p,
ID: path.Base(p),
Parent: &CollectionRef{c: c},
}
}
func convertData(jsonData string) (map[string]interface{}, error) {
var m map[string]interface{}
if err := json.Unmarshal([]byte(jsonData), &m); err != nil {
return nil, err
}
return convertTestMap(m), nil
}
func convertTestMap(m map[string]interface{}) map[string]interface{} {
for k, v := range m {
m[k] = convertTestValue(v)
}
return m
}
func convertTestValue(v interface{}) interface{} {
switch v := v.(type) {
case string:
switch v {
case "ServerTimestamp":
return ServerTimestamp
case "Delete":
return Delete
default:
return v
}
case float64:
if v == float64(int(v)) {
return int(v)
}
return v
case []interface{}:
for i, e := range v {
v[i] = convertTestValue(e)
}
return v
case map[string]interface{}:
return convertTestMap(v)
default:
return v
}
}
func convertSetOption(opt *pb.SetOption) SetOption {
if opt.All {
return MergeAll
}
return Merge(convertFieldPaths(opt.Fields)...)
}
func convertFieldPaths(fps []*pb.FieldPath) []FieldPath {
var res []FieldPath
for _, fp := range fps {
res = append(res, fp.Field)
}
return res
}
func convertPrecondition(t *testing.T, fp *fspb.Precondition) []Precondition {
if fp == nil {
return nil
}
var pc Precondition
switch fp := fp.ConditionType.(type) {
case *fspb.Precondition_Exists:
pc = exists(fp.Exists)
case *fspb.Precondition_UpdateTime:
tm, err := ptypes.Timestamp(fp.UpdateTime)
if err != nil {
t.Fatal(err)
}
pc = LastUpdateTime(tm)
default:
t.Fatalf("unknown precondition type %T", fp)
}
return []Precondition{pc}
}

Some files were not shown because too many files have changed in this diff Show More