updated GHA

Update to v2 SDK
updated dependencies
pull/11/head
Tobias Trabelsi 2 years ago
parent 989e7079a5
commit e1266ebf64
No known key found for this signature in database
GPG Key ID: FF0C2839718CAF2E

@ -32,11 +32,10 @@ jobs:
-
name: Import GPG key
id: import_gpg
uses: hashicorp/ghaction-import-gpg@v2.1.0
env:
# These secrets will need to be configured for the repository:
GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }}
PASSPHRASE: ${{ secrets.PASSPHRASE }}
uses: crazy-max/ghaction-import-gpg@v5.0.0
with:
gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.PASSPHRASE }}
-
name: Run GoReleaser
uses: goreleaser/goreleaser-action@v3.0.0

@ -8,7 +8,7 @@ import (
"net/http"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/logging"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging"
)
// Config is per-provider, specifies where to connect to gitea

@ -6,7 +6,7 @@ import (
"strings"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func dataSourceGiteaOrg() *schema.Resource {

@ -5,7 +5,7 @@ import (
"strings"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func dataSourceGiteaRepo() *schema.Resource {

@ -6,7 +6,7 @@ import (
"strings"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func dataSourceGiteaUser() *schema.Resource {

@ -4,8 +4,8 @@ import (
"fmt"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func TestAccDataSourceGiteaUser_basic(t *testing.T) {

@ -4,12 +4,11 @@ import (
"fmt"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
func Provider() *schema.Provider {
// The actual provider
return &schema.Provider{

@ -4,8 +4,8 @@ import (
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
var testAccProviders map[string]terraform.ResourceProvider

@ -4,7 +4,7 @@ import (
"fmt"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (

@ -4,7 +4,7 @@ import (
"fmt"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (

@ -5,7 +5,7 @@ import (
"strconv"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (

@ -5,7 +5,7 @@ import (
"strconv"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (

@ -6,7 +6,7 @@ import (
"strings"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (

@ -5,7 +5,7 @@ import (
"strconv"
"code.gitea.io/sdk/gitea"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (

@ -5,79 +5,64 @@ go 1.18
require (
code.gitea.io/sdk/gitea v0.15.1
github.com/hashicorp/terraform-plugin-docs v0.7.0
github.com/hashicorp/terraform-plugin-sdk v1.13.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.20.0
)
require (
cloud.google.com/go v0.45.1 // indirect
github.com/Masterminds/goutils v1.1.0 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
github.com/agext/levenshtein v1.2.2 // indirect
github.com/apparentlymart/go-cidr v1.0.1 // indirect
github.com/apparentlymart/go-textseg v1.0.0 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/aws/aws-sdk-go v1.25.3 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 // indirect
github.com/bgentry/speakeasy v0.1.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.7.0 // indirect
github.com/golang/protobuf v1.3.4 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-checkpoint v0.5.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-getter v1.4.0 // indirect
github.com/hashicorp/go-hclog v0.9.2 // indirect
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect
github.com/hashicorp/go-hclog v1.2.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-plugin v1.0.1 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.1 // indirect
github.com/hashicorp/go-version v1.4.0 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/hashicorp/hc-install v0.3.1 // indirect
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f // indirect
github.com/hashicorp/hcl/v2 v2.0.0 // indirect
github.com/hashicorp/go-plugin v1.4.4 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hc-install v0.4.0 // indirect
github.com/hashicorp/hcl/v2 v2.13.0 // indirect
github.com/hashicorp/logutils v1.0.0 // indirect
github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 // indirect
github.com/hashicorp/terraform-exec v0.16.0 // indirect
github.com/hashicorp/terraform-json v0.13.0 // indirect
github.com/hashicorp/terraform-plugin-test v1.3.0 // indirect
github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 // indirect
github.com/hashicorp/terraform-exec v0.17.2 // indirect
github.com/hashicorp/terraform-json v0.14.0 // indirect
github.com/hashicorp/terraform-plugin-go v0.12.0 // indirect
github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect
github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect
github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/cli v1.1.2 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/oklog/run v1.0.0 // indirect
github.com/posener/complete v1.2.1 // indirect
github.com/posener/complete v1.1.1 // indirect
github.com/russross/blackfriday v1.6.0 // indirect
github.com/spf13/afero v1.2.2 // indirect
github.com/ulikunitz/xz v0.5.5 // indirect
github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect
github.com/vmihailenco/tagparser v0.1.1 // indirect
github.com/zclconf/go-cty v1.10.0 // indirect
github.com/zclconf/go-cty-yaml v1.0.1 // indirect
go.opencensus.io v0.22.0 // indirect
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect
golang.org/x/net v0.0.0-20210326060303-6b1517762897 // indirect
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 // indirect
golang.org/x/text v0.3.5 // indirect
google.golang.org/api v0.9.0 // indirect
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20200310143817-43be25429f5a // indirect
google.golang.org/grpc v1.27.1 // indirect
golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 // indirect
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/genproto v0.0.0-20200711021454-869866162049 // indirect
google.golang.org/grpc v1.48.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
)

311
go.sum

@ -1,17 +1,9 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
code.gitea.io/gitea-vet v0.2.1/go.mod h1:zcNbT/aJEmivCAhfmkHOlT645KNOf9W2KnkLgFjGGfE=
code.gitea.io/sdk/gitea v0.15.1 h1:WJreC7YYuxbn0UDaPuWIe/mtiNKTvLN8MLkaw71yx/M=
code.gitea.io/sdk/gitea v0.15.1/go.mod h1:klY2LVI3s3NChzIk/MzMn7G1FHrfU7qd63iSMVoHRBA=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
@ -25,46 +17,46 @@ github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C6
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE=
github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0=
github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U=
github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I=
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=
github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ=
github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
@ -75,39 +67,37 @@ github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6
github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=
@ -116,54 +106,45 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-getter v1.4.0 h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw=
github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY=
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI=
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs=
github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw=
github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE=
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ=
github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=
github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hc-install v0.3.1 h1:VIjllE6KyAI1A244G8kTaHXy+TL5/XYzvrtFi8po/Yk=
github.com/hashicorp/hc-install v0.3.1/go.mod h1:3LCdWcCDS1gaHC9mhHCGbkYfoY6vdsKohGjugbZdZak=
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws=
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hashicorp/hcl/v2 v2.0.0 h1:efQznTz+ydmQXq3BOnRa3AXzvCeTq1P4dKj/z5GLlY8=
github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90=
github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk=
github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI=
github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc=
github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 h1:+RyjwU+Gnd/aTJBPZVDNm903eXVjjqhbaR4Ypx3xYyY=
github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A=
github.com/hashicorp/terraform-exec v0.16.0 h1:XUh9pJPcbfZsuhReVvmRarQTaiiCnYogFCCjOvEYuug=
github.com/hashicorp/terraform-exec v0.16.0/go.mod h1:wB5JHmjxZ/YVNZuv9npAXKmz5pGyxy8PSi0GRR0+YjA=
github.com/hashicorp/terraform-json v0.4.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU=
github.com/hashicorp/terraform-json v0.13.0 h1:Li9L+lKD1FO5RVFRM1mMMIBDoUHslOniyEi5CM+FWGY=
github.com/hashicorp/terraform-json v0.13.0/go.mod h1:y5OdLBCT+rxbwnpxZs9kGL7R9ExU76+cpdY8zHwoazk=
github.com/hashicorp/terraform-exec v0.17.2 h1:EU7i3Fh7vDUI9nNRdMATCEfnm9axzTnad8zszYZ73Go=
github.com/hashicorp/terraform-exec v0.17.2/go.mod h1:tuIbsL2l4MlwwIZx9HPM+LOV9vVyEfBYu2GsO1uH3/8=
github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s=
github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM=
github.com/hashicorp/terraform-plugin-docs v0.7.0 h1:7XKAOYHAxghe7q4/vx468X43X9GikdQ2dxtmcu2gQv0=
github.com/hashicorp/terraform-plugin-docs v0.7.0/go.mod h1:57CICKfW7/KbW4lPhKOledyT6vu1LeAOzuvWXsVaxUE=
github.com/hashicorp/terraform-plugin-sdk v1.13.0 h1:8v2/ZNiI12OHxEn8pzJ3noCHyRc0biKbKj+iFv5ZWKw=
github.com/hashicorp/terraform-plugin-sdk v1.13.0/go.mod h1:HiWIPD/T9HixIhQUwaSoDQxo4BLFdmiBi/Qz5gjB8Q0=
github.com/hashicorp/terraform-plugin-test v1.3.0 h1:hU5LoxrOn9qvOo+LTKN6mSav2J+dAMprbdxJPEQvp4U=
github.com/hashicorp/terraform-plugin-test v1.3.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs=
github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg=
github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/terraform-plugin-go v0.12.0 h1:6wW9mT1dSs0Xq4LR6HXj1heQ5ovr5GxXNJwkErZzpJw=
github.com/hashicorp/terraform-plugin-go v0.12.0/go.mod h1:kwhmaWHNDvT1B3QiSJdAtrB/D4RaKSY/v3r2BuoWK4M=
github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs=
github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4=
github.com/hashicorp/terraform-plugin-sdk/v2 v2.20.0 h1:+KxZULPsbjpAVoP0WNj/8aVW6EqpcX5JcUcQ5wl7Da4=
github.com/hashicorp/terraform-plugin-sdk/v2 v2.20.0/go.mod h1:DwGJG3KNxIPluVk6hexvDfYR/MS/eKGpiztJoT3Bbbw=
github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg=
github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI=
github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0=
github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
@ -174,13 +155,9 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck=
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
@ -191,77 +168,62 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw=
github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=
github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U=
github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY=
@ -269,39 +231,26 @@ github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgq
github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI=
github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8=
github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8=
github.com/zclconf/go-cty v1.9.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0=
github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8=
github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8=
github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 h1:O8uGbHCqlTp2P6QJSLmCojM4mN6UemYv8K+dCnmHmu0=
golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -310,120 +259,114 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210326060303-6b1517762897 h1:KrsHThm5nFk34YtATK1LsThyGhGbGe1olrte/HInHvs=
golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 h1:foEbQz/B0Oz6YIqu/69kfXPYeFQAuuMYFkjaqXzl5Wo=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200310143817-43be25429f5a h1:lRlI5zu6AFy3iU/F8YWyNrAmn/tPCnhiTxfwhWb76eU=
google.golang.org/genproto v0.0.0-20200310143817-43be25429f5a/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc=
google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

@ -2,7 +2,7 @@ package main // import "src.techknowlogick.com/terraform-provider-gitea"
import (
"git.uploadfilter24.eu/terraform-provider-gitea/gitea"
"github.com/hashicorp/terraform-plugin-sdk/plugin"
"github.com/hashicorp/terraform-plugin-sdk/v2/plugin"
)
var Version = "development"

202
vendor/cloud.google.com/go/LICENSE generated vendored

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,526 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = cl.getTrimmed(c.k)
} else {
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/googleapis/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Email calls Client.Email on the default client.
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
u := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", u, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
if res.StatusCode != 200 {
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Email(serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// Error contains an error response from the server.
type Error struct {
// Code is the HTTP response status code.
Code int
// Message is the server response message.
Message string
}
func (e *Error) Error() string {
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
}

@ -1,315 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package iam supports the resource-specific operations of Google Cloud
// IAM (Identity and Access Management) for the Google Cloud Libraries.
// See https://cloud.google.com/iam for more about IAM.
//
// Users of the Google Cloud Libraries will typically not use this package
// directly. Instead they will begin with some resource that supports IAM, like
// a pubsub topic, and call its IAM method to get a Handle for that resource.
package iam
import (
"context"
"fmt"
"time"
gax "github.com/googleapis/gax-go/v2"
pb "google.golang.org/genproto/googleapis/iam/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// client abstracts the IAMPolicy API to allow multiple implementations.
type client interface {
Get(ctx context.Context, resource string) (*pb.Policy, error)
Set(ctx context.Context, resource string, p *pb.Policy) error
Test(ctx context.Context, resource string, perms []string) ([]string, error)
}
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
type grpcClient struct {
c pb.IAMPolicyClient
}
var withRetry = gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60 * time.Second,
Multiplier: 1.3,
})
})
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
var proto *pb.Policy
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
return err
}, withRetry)
if err != nil {
return nil, err
}
return proto, nil
}
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
Resource: resource,
Policy: p,
})
return err
}, withRetry)
}
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
var res *pb.TestIamPermissionsResponse
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
Resource: resource,
Permissions: perms,
})
return err
}, withRetry)
if err != nil {
return nil, err
}
return res.Permissions, nil
}
// A Handle provides IAM operations for a resource.
type Handle struct {
c client
resource string
}
// InternalNewHandle is for use by the Google Cloud Libraries only.
//
// InternalNewHandle returns a Handle for resource.
// The conn parameter refers to a server that must support the IAMPolicy service.
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
}
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// grpc service that implements IAM as a mixin
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: c}, resource)
}
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// client implementation.
func InternalNewHandleClient(c client, resource string) *Handle {
return &Handle{
c: c,
resource: resource,
}
}
// Policy retrieves the IAM policy for the resource.
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
proto, err := h.c.Get(ctx, h.resource)
if err != nil {
return nil, err
}
return &Policy{InternalProto: proto}, nil
}
// SetPolicy replaces the resource's current policy with the supplied Policy.
//
// If policy was created from a prior call to Get, then the modification will
// only succeed if the policy has not changed since the Get.
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
return h.c.Set(ctx, h.resource, policy.InternalProto)
}
// TestPermissions returns the subset of permissions that the caller has on the resource.
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
return h.c.Test(ctx, h.resource, permissions)
}
// A RoleName is a name representing a collection of permissions.
type RoleName string
// Common role names.
const (
Owner RoleName = "roles/owner"
Editor RoleName = "roles/editor"
Viewer RoleName = "roles/viewer"
)
const (
// AllUsers is a special member that denotes all users, even unauthenticated ones.
AllUsers = "allUsers"
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
AllAuthenticatedUsers = "allAuthenticatedUsers"
)
// A Policy is a list of Bindings representing roles
// granted to members.
//
// The zero Policy is a valid policy with no bindings.
type Policy struct {
// TODO(jba): when type aliases are available, put Policy into an internal package
// and provide an exported alias here.
// This field is exported for use by the Google Cloud Libraries only.
// It may become unexported in a future release.
InternalProto *pb.Policy
}
// Members returns the list of members with the supplied role.
// The return value should not be modified. Use Add and Remove
// to modify the members of a role.
func (p *Policy) Members(r RoleName) []string {
b := p.binding(r)
if b == nil {
return nil
}
return b.Members
}
// HasRole reports whether member has role r.
func (p *Policy) HasRole(member string, r RoleName) bool {
return memberIndex(member, p.binding(r)) >= 0
}
// Add adds member member to role r if it is not already present.
// A new binding is created if there is no binding for the role.
func (p *Policy) Add(member string, r RoleName) {
b := p.binding(r)
if b == nil {
if p.InternalProto == nil {
p.InternalProto = &pb.Policy{}
}
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
Role: string(r),
Members: []string{member},
})
return
}
if memberIndex(member, b) < 0 {
b.Members = append(b.Members, member)
return
}
}
// Remove removes member from role r if it is present.
func (p *Policy) Remove(member string, r RoleName) {
bi := p.bindingIndex(r)
if bi < 0 {
return
}
bindings := p.InternalProto.Bindings
b := bindings[bi]
mi := memberIndex(member, b)
if mi < 0 {
return
}
// Order doesn't matter for bindings or members, so to remove, move the last item
// into the removed spot and shrink the slice.
if len(b.Members) == 1 {
// Remove binding.
last := len(bindings) - 1
bindings[bi] = bindings[last]
bindings[last] = nil
p.InternalProto.Bindings = bindings[:last]
return
}
// Remove member.
// TODO(jba): worry about multiple copies of m?
last := len(b.Members) - 1
b.Members[mi] = b.Members[last]
b.Members[last] = ""
b.Members = b.Members[:last]
}
// Roles returns the names of all the roles that appear in the Policy.
func (p *Policy) Roles() []RoleName {
if p.InternalProto == nil {
return nil
}
var rns []RoleName
for _, b := range p.InternalProto.Bindings {
rns = append(rns, RoleName(b.Role))
}
return rns
}
// binding returns the Binding for the suppied role, or nil if there isn't one.
func (p *Policy) binding(r RoleName) *pb.Binding {
i := p.bindingIndex(r)
if i < 0 {
return nil
}
return p.InternalProto.Bindings[i]
}
func (p *Policy) bindingIndex(r RoleName) int {
if p.InternalProto == nil {
return -1
}
for i, b := range p.InternalProto.Bindings {
if b.Role == string(r) {
return i
}
}
return -1
}
// memberIndex returns the index of m in b's Members, or -1 if not found.
func memberIndex(m string, b *pb.Binding) int {
if b == nil {
return -1
}
for i, mm := range b.Members {
if mm == m {
return i
}
}
return -1
}
// insertMetadata inserts metadata into the given context
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

@ -1,54 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/status"
)
// Annotate prepends msg to the error message in err, attempting
// to preserve other information in err, like an error code.
//
// Annotate panics if err is nil.
//
// Annotate knows about these error types:
// - "google.golang.org/grpc/status".Status
// - "google.golang.org/api/googleapi".Error
// If the error is not one of these types, Annotate behaves
// like
// fmt.Errorf("%s: %v", msg, err)
func Annotate(err error, msg string) error {
if err == nil {
panic("Annotate called with nil")
}
if s, ok := status.FromError(err); ok {
p := s.Proto()
p.Message = msg + ": " + p.Message
return status.ErrorProto(p)
}
if g, ok := err.(*googleapi.Error); ok {
g.Message = msg + ": " + g.Message
return g
}
return fmt.Errorf("%s: %v", msg, err)
}
// Annotatef uses format and args to format a string, then calls Annotate.
func Annotatef(err error, format string, args ...interface{}) error {
return Annotate(err, fmt.Sprintf(format, args...))
}

@ -1,108 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package optional provides versions of primitive types that can
// be nil. These are useful in methods that update some of an API object's
// fields.
package optional
import (
"fmt"
"strings"
"time"
)
type (
// Bool is either a bool or nil.
Bool interface{}
// String is either a string or nil.
String interface{}
// Int is either an int or nil.
Int interface{}
// Uint is either a uint or nil.
Uint interface{}
// Float64 is either a float64 or nil.
Float64 interface{}
// Duration is either a time.Duration or nil.
Duration interface{}
)
// ToBool returns its argument as a bool.
// It panics if its argument is nil or not a bool.
func ToBool(v Bool) bool {
x, ok := v.(bool)
if !ok {
doPanic("Bool", v)
}
return x
}
// ToString returns its argument as a string.
// It panics if its argument is nil or not a string.
func ToString(v String) string {
x, ok := v.(string)
if !ok {
doPanic("String", v)
}
return x
}
// ToInt returns its argument as an int.
// It panics if its argument is nil or not an int.
func ToInt(v Int) int {
x, ok := v.(int)
if !ok {
doPanic("Int", v)
}
return x
}
// ToUint returns its argument as a uint.
// It panics if its argument is nil or not a uint.
func ToUint(v Uint) uint {
x, ok := v.(uint)
if !ok {
doPanic("Uint", v)
}
return x
}
// ToFloat64 returns its argument as a float64.
// It panics if its argument is nil or not a float64.
func ToFloat64(v Float64) float64 {
x, ok := v.(float64)
if !ok {
doPanic("Float64", v)
}
return x
}
// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}
func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
}

@ -1,54 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"context"
"time"
gax "github.com/googleapis/gax-go/v2"
)
// Retry calls the supplied function f repeatedly according to the provided
// backoff parameters. It returns when one of the following occurs:
// When f's first return value is true, Retry immediately returns with f's second
// return value.
// When the provided context is done, Retry returns with an error that
// includes both ctx.Error() and the last error returned by f.
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
return retry(ctx, bo, f, gax.Sleep)
}
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
sleep func(context.Context, time.Duration) error) error {
var lastErr error
for {
stop, err := f()
if stop {
return err
}
// Remember the last "real" error from f.
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
lastErr = err
}
p := bo.Pause()
if cerr := sleep(ctx, p); cerr != nil {
if lastErr != nil {
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
}
return cerr
}
}
}

@ -1,109 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
"fmt"
"go.opencensus.io/trace"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
// StartSpan adds a span to the trace with the given name.
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
// EndSpan ends a span with the given error.
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
// toStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
func toStatus(err error) trace.Status {
if err2, ok := err.(*googleapi.Error); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
} else {
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
}
}
// TODO(deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
switch httpStatusCode {
case 200:
return int32(code.Code_OK)
case 499:
return int32(code.Code_CANCELLED)
case 500:
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
case 400:
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
case 504:
return int32(code.Code_DEADLINE_EXCEEDED)
case 404:
return int32(code.Code_NOT_FOUND)
case 409:
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
case 403:
return int32(code.Code_PERMISSION_DENIED)
case 401:
return int32(code.Code_UNAUTHENTICATED)
case 429:
return int32(code.Code_RESOURCE_EXHAUSTED)
case 501:
return int32(code.Code_UNIMPLEMENTED)
case 503:
return int32(code.Code_UNAVAILABLE)
default:
return int32(code.Code_UNKNOWN)
}
}
// TODO: (odeke-em): perhaps just pass around spans due to the cost
// incurred from using trace.FromContext(ctx) yet we could avoid
// throwing away the work done by ctx, span := trace.StartSpan.
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
var attrs []trace.Attribute
for k, v := range attrMap {
var a trace.Attribute
switch v := v.(type) {
case string:
a = trace.StringAttribute(k, v)
case bool:
a = trace.BoolAttribute(k, v)
case int:
a = trace.Int64Attribute(k, int64(v))
case int64:
a = trace.Int64Attribute(k, v)
default:
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
}
attrs = append(attrs, a)
}
trace.FromContext(ctx).Annotatef(attrs, format, args...)
}

@ -1,19 +0,0 @@
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
today=$(date +%Y%m%d)
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE

@ -1,71 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate ./update_version.sh
// Package version contains version information for Google Cloud Client
// Libraries for Go, as reported in request headers.
package version
import (
"runtime"
"strings"
"unicode"
)
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20190802"
// Go returns the Go runtime version. The returned string
// has no whitespace.
func Go() string {
return goVersion
}
var goVersion = goVer(runtime.Version())
const develPrefix = "devel +"
func goVer(s string) string {
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return ""
}
func notSemverRune(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}

@ -1,32 +0,0 @@
## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
- [About Cloud Storage](https://cloud.google.com/storage/)
- [API documentation](https://cloud.google.com/storage/docs)
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
### Example Usage
First create a `storage.Client` to use throughout your application:
[snip]:# (storage-1)
```go
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatal(err)
}
```
[snip]:# (storage-2)
```go
// Read the object1 from bucket.
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
if err != nil {
log.Fatal(err)
}
defer rc.Close()
body, err := ioutil.ReadAll(rc)
if err != nil {
log.Fatal(err)
}
```

@ -1,335 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"net/http"
"reflect"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
// ACLRole is the level of access to grant.
type ACLRole string
const (
RoleOwner ACLRole = "OWNER"
RoleReader ACLRole = "READER"
RoleWriter ACLRole = "WRITER"
)
// ACLEntity refers to a user or group.
// They are sometimes referred to as grantees.
//
// It could be in the form of:
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
// "domain-<domain>" and "project-team-<projectId>".
//
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
type ACLEntity string
const (
AllUsers ACLEntity = "allUsers"
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
)
// ACLRule represents a grant for a role to an entity (user, group or team) for a
// Google Cloud Storage object or bucket.
type ACLRule struct {
Entity ACLEntity
EntityID string
Role ACLRole
Domain string
Email string
ProjectTeam *ProjectTeam
}
// ProjectTeam is the project team associated with the entity, if any.
type ProjectTeam struct {
ProjectNumber string
Team string
}
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
type ACLHandle struct {
c *Client
bucket string
object string
isDefault bool
userProject string // for requester-pays buckets
}
// Delete permanently deletes the ACL entry for the given entity.
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectDelete(ctx, entity)
}
if a.isDefault {
return a.bucketDefaultDelete(ctx, entity)
}
return a.bucketDelete(ctx, entity)
}
// Set sets the role for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectSet(ctx, entity, role, false)
}
if a.isDefault {
return a.objectSet(ctx, entity, role, true)
}
return a.bucketSet(ctx, entity, role)
}
// List retrieves ACL entries.
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectList(ctx)
}
if a.isDefault {
return a.bucketDefaultList(ctx)
}
return a.bucketList(ctx)
}
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
return runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
a.configureCall(ctx, req)
return req.Do()
})
}
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.BucketAccessControls
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.List(a.bucket)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return toBucketACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
acl := &raw.BucketAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
err := runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
a.configureCall(ctx, req)
_, err := req.Do()
return err
})
if err != nil {
return err
}
return nil
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
return runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
a.configureCall(ctx, req)
return req.Do()
})
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
type setRequest interface {
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
Header() http.Header
}
acl := &raw.ObjectAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
var req setRequest
if isBucketDefault {
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
} else {
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
}
a.configureCall(ctx, req)
return runWithRetry(ctx, func() error {
_, err := req.Do()
return err
})
}
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
return runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
a.configureCall(ctx, req)
return req.Do()
})
}
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
vc := reflect.ValueOf(call)
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
if a.userProject != "" {
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
}
setClientHeader(call.Header())
}
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toObjectACLRule(item))
}
return rs
}
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toBucketACLRule(item))
}
return rs
}
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
}
}
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
}
}
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.ObjectAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
}
return r
}
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.BucketAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
}
return r
}
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
return &raw.BucketAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
return &raw.ObjectAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}

File diff suppressed because it is too large Load Diff

@ -1,228 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
)
// CopierFrom creates a Copier that can copy src to dst.
// You can immediately call Run on the returned Copier, or
// you can configure it first.
//
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
// in which case the user project of src is billed.
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
return &Copier{dst: dst, src: src}
}
// A Copier copies a source object to a destination.
type Copier struct {
// ObjectAttrs are optional attributes to set on the destination object.
// Any attributes must be initialized before any calls on the Copier. Nil
// or zero-valued attributes are ignored.
ObjectAttrs
// RewriteToken can be set before calling Run to resume a copy
// operation. After Run returns a non-nil error, RewriteToken will
// have been updated to contain the value needed to resume the copy.
RewriteToken string
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
// operation. If ProgressFunc is not nil and copying requires multiple
// calls to the underlying service (see
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
// ProgressFunc will be invoked after each call with the number of bytes of
// content copied so far and the total size in bytes of the source object.
//
// ProgressFunc is intended to make upload progress available to the
// application. For example, the implementation of ProgressFunc may update
// a progress bar in the application's UI, or log the result of
// float64(copiedBytes)/float64(totalBytes).
//
// ProgressFunc should return quickly without blocking.
ProgressFunc func(copiedBytes, totalBytes uint64)
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
// any.
//
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
// (via ObjectHandle.Key) on the destination object will result in an error when
// Run is called.
DestinationKMSKeyName string
dst, src *ObjectHandle
}
// Run performs the copy.
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.src.validate(); err != nil {
return nil, err
}
if err := c.dst.validate(); err != nil {
return nil, err
}
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
}
// Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket
// does not cause any problems.
rawObject := c.ObjectAttrs.toRawObject("")
for {
res, err := c.callRewrite(ctx, rawObject)
if err != nil {
return nil, err
}
if c.ProgressFunc != nil {
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
}
if res.Done { // Finished successfully.
return newObject(res.Resource), nil
}
}
}
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
call.Context(ctx).Projection("full")
if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken)
}
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
} else if c.src.userProject != "" {
call.UserProject(c.src.userProject)
}
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
return nil, err
}
var res *raw.RewriteResponse
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
if err != nil {
return nil, err
}
c.RewriteToken = res.RewriteToken
return res, nil
}
// ComposerFrom creates a Composer that can compose srcs into dst.
// You can immediately call Run on the returned Composer, or you can
// configure it first.
//
// The encryption key for the destination object will be used to decrypt all
// source objects and encrypt the destination object. It is an error
// to specify an encryption key for any of the source objects.
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
return &Composer{dst: dst, srcs: srcs}
}
// A Composer composes source objects into a destination object.
//
// For Requester Pays buckets, the user project of dst is billed.
type Composer struct {
// ObjectAttrs are optional attributes to set on the destination object.
// Any attributes must be initialized before any calls on the Composer. Nil
// or zero-valued attributes are ignored.
ObjectAttrs
dst *ObjectHandle
srcs []*ObjectHandle
}
// Run performs the compose operation.
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.dst.validate(); err != nil {
return nil, err
}
if len(c.srcs) == 0 {
return nil, errors.New("storage: at least one source object must be specified")
}
req := &raw.ComposeRequest{}
// Compose requires a non-empty Destination, so we always set it,
// even if the caller-provided ObjectAttrs is the zero value.
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
for _, src := range c.srcs {
if err := src.validate(); err != nil {
return nil, err
}
if src.bucket != c.dst.bucket {
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
}
if src.encryptionKey != nil {
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
}
srcObj := &raw.ComposeRequestSourceObjects{
Name: src.object,
}
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
return nil, err
}
req.SourceObjects = append(req.SourceObjects, srcObj)
}
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if err != nil {
return nil, err
}
return newObject(obj), nil
}

@ -1,176 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package storage provides an easy way to work with Google Cloud Storage.
Google Cloud Storage stores data in named objects, which are grouped into buckets.
More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
All of the methods of this package use exponential backoff to retry calls that fail
with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
indefinitely unless the controlling context is canceled or the client is closed. See
context.WithTimeout and context.WithCancel.
Creating a Client
To start working with this package, create a client:
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
The client will use your default application credentials.
If you only wish to access public data, you can create
an unauthenticated client with
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
Buckets
A Google Cloud Storage bucket is a collection of objects. To work with a
bucket, make a bucket handle:
bkt := client.Bucket(bucketName)
A handle is a reference to a bucket. You can have a handle even if the
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
call Create on the handle:
if err := bkt.Create(ctx, projectID, nil); err != nil {
// TODO: Handle error.
}
Note that although buckets are associated with projects, bucket names are
global across all projects.
Each bucket has associated metadata, represented in this package by
BucketAttrs. The third argument to BucketHandle.Create allows you to set
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
Attrs:
attrs, err := bkt.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
Objects
An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets, but unlike buckets
you don't explicitly create an object. Instead, the first time you write
to an object it will be created. You can use the standard Go io.Reader
and io.Writer interfaces to read and write object data:
obj := bkt.Object("data")
// Write something to obj.
// w implements io.Writer.
w := obj.NewWriter(ctx)
// Write some text to obj. This will either create the object or overwrite whatever is there already.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error.
}
// Close, just like writing a file.
if err := w.Close(); err != nil {
// TODO: Handle error.
}
// Read it back.
r, err := obj.NewReader(ctx)
if err != nil {
// TODO: Handle error.
}
defer r.Close()
if _, err := io.Copy(os.Stdout, r); err != nil {
// TODO: Handle error.
}
// Prints "This object contains text."
Objects also have attributes, which you can fetch with Attrs:
objAttrs, err := obj.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("object %s has size %d and can be read using %s\n",
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
ACLs
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
ACLRules, each of which specifies the role of a user, group or project. ACLs
are suitable for fine-grained control, but you may prefer using IAM to control
access at the project level (see
https://cloud.google.com/storage/docs/access-control/iam).
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
acls, err := obj.ACL().List(ctx)
if err != nil {
// TODO: Handle error.
}
for _, rule := range acls {
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
}
You can also set and delete ACLs.
Conditions
Every object has a generation and a metageneration. The generation changes
whenever the content changes, and the metageneration changes whenever the
metadata changes. Conditions let you check these values before an operation;
the operation only executes if the conditions match. You can use conditions to
prevent race conditions in read-modify-write operations.
For example, say you've read an object's metadata into objAttrs. Now
you want to write to that object, but only if its contents haven't changed
since you read it. Here is how to express that:
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
// Proceed with writing as above.
Signed URLs
You can obtain a URL that lets anyone read or write an object for a limited time.
You don't need to create a client to do this. See the documentation of
SignedURL for details.
url, err := storage.SignedURL(bucketName, "shared-object", opts)
if err != nil {
// TODO: Handle error.
}
fmt.Println(url)
Errors
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
if e, ok := err.(*googleapi.Error); ok {
if e.Code == 409 { ... }
}
*/
package storage // import "cloud.google.com/go/storage"

@ -1,32 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.10
package storage
import "google.golang.org/api/googleapi"
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}

@ -1,330 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"time"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"
)
// HMACState is the state of the HMAC key.
type HMACState string
const (
// Active is the status for an active key that can be used to sign
// requests.
Active HMACState = "ACTIVE"
// Inactive is the status for an inactive key thus requests signed by
// this key will be denied.
Inactive HMACState = "INACTIVE"
// Deleted is the status for a key that is deleted.
// Once in this state the key cannot key cannot be recovered
// and does not count towards key limits. Deleted keys will be cleaned
// up later.
Deleted HMACState = "DELETED"
)
// HMACKey is the representation of a Google Cloud Storage HMAC key.
//
// HMAC keys are used to authenticate signed access to objects. To enable HMAC key
// authentication, please visit https://cloud.google.com/storage/docs/migrating.
//
// This type is EXPERIMENTAL and subject to change or removal without notice.
type HMACKey struct {
// The HMAC's secret key.
Secret string
// AccessID is the ID of the HMAC key.
AccessID string
// Etag is the HTTP/1.1 Entity tag.
Etag string
// ID is the ID of the HMAC key, including the ProjectID and AccessID.
ID string
// ProjectID is the ID of the project that owns the
// service account to which the key authenticates.
ProjectID string
// ServiceAccountEmail is the email address
// of the key's associated service account.
ServiceAccountEmail string
// CreatedTime is the creation time of the HMAC key.
CreatedTime time.Time
// UpdatedTime is the last modification time of the HMAC key metadata.
UpdatedTime time.Time
// State is the state of the HMAC key.
// It can be one of StateActive, StateInactive or StateDeleted.
State HMACState
}
// HMACKeyHandle helps provide access and management for HMAC keys.
//
// This type is EXPERIMENTAL and subject to change or removal without notice.
type HMACKeyHandle struct {
projectID string
accessID string
raw *raw.ProjectsHmacKeysService
}
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
return &HMACKeyHandle{
projectID: projectID,
accessID: accessID,
raw: raw.NewProjectsHmacKeysService(c.raw),
}
}
// Get invokes an RPC to retrieve the HMAC key referenced by the
// HMACKeyHandle's accessID.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (hkh *HMACKeyHandle) Get(ctx context.Context) (*HMACKey, error) {
call := hkh.raw.Get(hkh.projectID, hkh.accessID)
setClientHeader(call.Header())
var metadata *raw.HmacKeyMetadata
var err error
err = runWithRetry(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
hkPb := &raw.HmacKey{
Metadata: metadata,
}
return pbHmacKeyToHMACKey(hkPb, false)
}
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
// Only inactive HMAC keys can be deleted.
// After deletion, a key cannot be used to authenticate requests.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (hkh *HMACKeyHandle) Delete(ctx context.Context) error {
delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID)
setClientHeader(delCall.Header())
return runWithRetry(ctx, func() error {
return delCall.Context(ctx).Do()
})
}
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
pbmd := pb.Metadata
if pbmd == nil {
return nil, errors.New("field Metadata cannot be nil")
}
createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated)
if err != nil {
return nil, fmt.Errorf("field CreatedTime: %v", err)
}
updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated)
if err != nil && !updatedTimeCanBeNil {
return nil, fmt.Errorf("field UpdatedTime: %v", err)
}
hmk := &HMACKey{
AccessID: pbmd.AccessId,
Secret: pb.Secret,
Etag: pbmd.Etag,
ID: pbmd.Id,
State: HMACState(pbmd.State),
ProjectID: pbmd.ProjectId,
CreatedTime: createdTime,
UpdatedTime: updatedTime,
ServiceAccountEmail: pbmd.ServiceAccountEmail,
}
return hmk, nil
}
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string) (*HMACKey, error) {
if projectID == "" {
return nil, errors.New("storage: expecting a non-blank projectID")
}
if serviceAccountEmail == "" {
return nil, errors.New("storage: expecting a non-blank service account email")
}
svc := raw.NewProjectsHmacKeysService(c.raw)
call := svc.Create(projectID, serviceAccountEmail)
setClientHeader(call.Header())
var hkPb *raw.HmacKey
var err error
err = runWithRetry(ctx, func() error {
hkPb, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return pbHmacKeyToHMACKey(hkPb, true)
}
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
//
// This type is EXPERIMENTAL and subject to change or removal without notice.
type HMACKeyAttrsToUpdate struct {
// State is required and must be either StateActive or StateInactive.
State HMACState
// Etag is an optional field and it is the HTTP/1.1 Entity tag.
Etag string
}
// Update mutates the HMACKey referred to by accessID.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*HMACKey, error) {
if au.State != Active && au.State != Inactive {
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
}
call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{
Etag: au.Etag,
State: string(au.State),
})
setClientHeader(call.Header())
var metadata *raw.HmacKeyMetadata
var err error
err = runWithRetry(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
hkPb := &raw.HmacKey{
Metadata: metadata,
}
return pbHmacKeyToHMACKey(hkPb, false)
}
// An HMACKeysIterator is an iterator over HMACKeys.
//
// This type is EXPERIMENTAL and subject to change or removal without notice.
type HMACKeysIterator struct {
ctx context.Context
raw *raw.ProjectsHmacKeysService
projectID string
hmacKeys []*HMACKey
pageInfo *iterator.PageInfo
nextFunc func() error
index int
}
// ListHMACKeys returns an iterator for listing HMACKeys.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (c *Client) ListHMACKeys(ctx context.Context, projectID string) *HMACKeysIterator {
it := &HMACKeysIterator{
ctx: ctx,
raw: raw.NewProjectsHmacKeysService(c.raw),
projectID: projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.hmacKeys) - it.index },
func() interface{} {
prev := it.hmacKeys
it.hmacKeys = it.hmacKeys[:0]
it.index = 0
return prev
})
return it
}
// Next returns the next result. Its second return value is iterator.Done if
// there are no more results. Once Next returns iterator.Done, all subsequent
// calls will return iterator.Done.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (it *HMACKeysIterator) Next() (*HMACKey, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
key := it.hmacKeys[it.index]
it.index++
return key, nil
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) {
call := it.raw.List(it.projectID)
setClientHeader(call.Header())
call = call.PageToken(pageToken)
// By default we'll also show deleted keys and then
// let users filter on their own.
call = call.ShowDeletedKeys(true)
if pageSize > 0 {
call = call.MaxResults(int64(pageSize))
}
ctx := it.ctx
var resp *raw.HmacKeysMetadata
err = runWithRetry(it.ctx, func() error {
resp, err = call.Context(ctx).Do()
return err
})
if err != nil {
return "", err
}
for _, metadata := range resp.Items {
hkPb := &raw.HmacKey{
Metadata: metadata,
}
hkey, err := pbHmacKeyToHMACKey(hkPb, true)
if err != nil {
return "", err
}
it.hmacKeys = append(it.hmacKeys, hkey)
}
return resp.NextPageToken, nil
}

@ -1,130 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
iampb "google.golang.org/genproto/googleapis/iam/v1"
)
// IAM provides access to IAM access control for the bucket.
func (b *BucketHandle) IAM() *iam.Handle {
return iam.InternalNewHandleClient(&iamClient{
raw: b.c.raw,
userProject: b.userProject,
}, b.name)
}
// iamClient implements the iam.client interface.
type iamClient struct {
raw *raw.Service
userProject string
}
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.GetIamPolicy(resource)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var rp *raw.Policy
err = runWithRetry(ctx, func() error {
rp, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return iamFromStoragePolicy(rp), nil
}
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
defer func() { trace.EndSpan(ctx, err) }()
rp := iamToStoragePolicy(p)
call := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
return runWithRetry(ctx, func() error {
_, err := call.Context(ctx).Do()
return err
})
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var res *raw.TestIamPermissionsResponse
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return res.Permissions, nil
}
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
return &raw.Policy{
Bindings: iamToStorageBindings(ip.Bindings),
Etag: string(ip.Etag),
}
}
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
var rbs []*raw.PolicyBindings
for _, ib := range ibs {
rbs = append(rbs, &raw.PolicyBindings{
Role: ib.Role,
Members: ib.Members,
})
}
return rbs
}
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
return &iampb.Policy{
Bindings: iamFromStorageBindings(rp.Bindings),
Etag: []byte(rp.Etag),
}
}
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
var ibs []*iampb.Binding
for _, rb := range rbs {
ibs = append(ibs, &iampb.Binding{
Role: rb.Role,
Members: rb.Members,
})
}
return ibs
}

@ -1,37 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go/v2"
)
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
func runWithRetry(ctx context.Context, call func() error) error {
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
if shouldRetry(err) {
return false, nil
}
return true, err
})
}

@ -1,42 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.10
package storage
import (
"net/url"
"strings"
"google.golang.org/api/googleapi"
)
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case *url.Error:
// Retry on REFUSED_STREAM.
// Unfortunately the error type is unexported, so we resort to string
// matching.
return strings.Contains(e.Error(), "REFUSED_STREAM")
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}

@ -1,188 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"regexp"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
)
// A Notification describes how to send Cloud PubSub messages when certain
// events occur in a bucket.
type Notification struct {
//The ID of the notification.
ID string
// The ID of the topic to which this subscription publishes.
TopicID string
// The ID of the project to which the topic belongs.
TopicProjectID string
// Only send notifications about listed event types. If empty, send notifications
// for all event types.
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
EventTypes []string
// If present, only apply this notification configuration to object names that
// begin with this prefix.
ObjectNamePrefix string
// An optional list of additional attributes to attach to each Cloud PubSub
// message published for this notification subscription.
CustomAttributes map[string]string
// The contents of the message payload.
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
PayloadFormat string
}
// Values for Notification.PayloadFormat.
const (
// Send no payload with notification messages.
NoPayload = "NONE"
// Send object metadata as JSON with notification messages.
JSONPayload = "JSON_API_V1"
)
// Values for Notification.EventTypes.
const (
// Event that occurs when an object is successfully created.
ObjectFinalizeEvent = "OBJECT_FINALIZE"
// Event that occurs when the metadata of an existing object changes.
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
// Event that occurs when an object is permanently deleted.
ObjectDeleteEvent = "OBJECT_DELETE"
// Event that occurs when the live version of an object becomes an
// archived version.
ObjectArchiveEvent = "OBJECT_ARCHIVE"
)
func toNotification(rn *raw.Notification) *Notification {
n := &Notification{
ID: rn.Id,
EventTypes: rn.EventTypes,
ObjectNamePrefix: rn.ObjectNamePrefix,
CustomAttributes: rn.CustomAttributes,
PayloadFormat: rn.PayloadFormat,
}
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
return n
}
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
// parseNotificationTopic extracts the project and topic IDs from from the full
// resource name returned by the service. If the name is malformed, it returns
// "?" for both IDs.
func parseNotificationTopic(nt string) (projectID, topicID string) {
matches := topicRE.FindStringSubmatch(nt)
if matches == nil {
return "?", "?"
}
return matches[1], matches[2]
}
func toRawNotification(n *Notification) *raw.Notification {
return &raw.Notification{
Id: n.ID,
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
n.TopicProjectID, n.TopicID),
EventTypes: n.EventTypes,
ObjectNamePrefix: n.ObjectNamePrefix,
CustomAttributes: n.CustomAttributes,
PayloadFormat: string(n.PayloadFormat),
}
}
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
if n.ID != "" {
return nil, errors.New("storage: AddNotification: ID must not be set")
}
if n.TopicProjectID == "" {
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
}
if n.TopicID == "" {
return nil, errors.New("storage: AddNotification: missing TopicID")
}
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
rn, err := call.Context(ctx).Do()
if err != nil {
return nil, err
}
return toNotification(rn), nil
}
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.List(b.name)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
var res *raw.Notifications
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return notificationsToMap(res.Items), nil
}
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
m := map[string]*Notification{}
for _, rn := range rns {
m[rn.Id] = toNotification(rn)
}
return m
}
// DeleteNotification deletes the notification with the given ID.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.Delete(b.name, id)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
return call.Context(ctx).Do()
}

@ -1,403 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
)
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
// ReaderObjectAttrs are attributes about the object being read. These are populated
// during the New call. This struct only holds a subset of object attributes: to
// get the full set of attributes, use ObjectHandle.Attrs.
//
// Each field is read-only.
type ReaderObjectAttrs struct {
// Size is the length of the object's content.
Size int64
// StartOffset is the byte offset within the object
// from which reading begins.
// This value is only non-zero for range requests.
StartOffset int64
// ContentType is the MIME type of the object's content.
ContentType string
// ContentEncoding is the encoding of the object's content.
ContentEncoding string
// CacheControl specifies whether and for how long browser and Internet
// caches are allowed to cache your objects.
CacheControl string
// LastModified is the time that the object was last modified.
LastModified time.Time
// Generation is the generation number of the object's content.
Generation int64
// Metageneration is the version of the metadata for this object at
// this generation. This field is used for preconditions and for
// detecting changes in metadata. A metageneration number is only
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end. If offset is negative, the object is read abs(offset) bytes
// from the end, and length must also be negative to indicate all remaining
// bytes will be read.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 && length >= 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: o.c.scheme,
Host: o.c.readHost,
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if o.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
gen := o.gen
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
start := offset + seen
if length < 0 && start < 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d", start))
} else if length < 0 && start > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
} else if length > 0 {
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
// We wait to assign conditions here because the generation number can change in between reopen() runs.
req.URL.RawQuery = conditionsQuery(gen, o.conds)
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
// If a generation hasn't been specified, and this is the first response we get, let's record the
// generation. In future requests we'll use this generation as a precondition to avoid data races.
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
if err != nil {
return err
}
gen = gen64
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
res, err := reopen(0)
if err != nil {
return nil, err
}
var (
size int64 // total size of object, even if a range was requested.
checkCRC bool
crc uint32
startOffset int64 // non-zero if range request.
)
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
dashIndex := strings.Index(cr, "-")
if dashIndex >= 0 {
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err)
}
}
} else {
size = res.ContentLength
// Check the CRC iff all of the following hold:
// - We asked for content (length != 0).
// - We got all the content (status != PartialContent).
// - The server sent a CRC header.
// - The Go http stack did not uncompress the file.
// - We were not served compressed data that was uncompressed on download.
// The problem with the last two cases is that the CRC will not match -- GCS
// computes it on the compressed contents, but we compute it on the
// uncompressed contents.
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
crc, checkCRC = parseCRC32c(res)
}
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var metaGen int64
if res.Header.Get("X-Goog-Generation") != "" {
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
if err != nil {
return nil, err
}
}
var lm time.Time
if res.Header.Get("Last-Modified") != "" {
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
return nil, err
}
}
attrs := ReaderObjectAttrs{
Size: size,
ContentType: res.Header.Get("Content-Type"),
ContentEncoding: res.Header.Get("Content-Encoding"),
CacheControl: res.Header.Get("Cache-Control"),
LastModified: lm,
StartOffset: startOffset,
Generation: gen,
Metageneration: metaGen,
}
return &Reader{
Attrs: attrs,
body: body,
size: size,
remain: remain,
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
}
func uncompressedByServer(res *http.Response) bool {
// If the data is stored as gzip but is not encoded as gzip, then it
// was uncompressed by the server.
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
res.Header.Get("Content-Encoding") != "gzip"
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// Reader reads a Cloud Storage object.
// It implements io.Reader.
//
// Typically, a Reader computes the CRC of the downloaded content and compares it to
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
Attrs ReaderObjectAttrs
body io.ReadCloser
seen, remain, size int64
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
reopen func(seen int64) (*http.Response, error)
}
// Close closes the Reader. It must be called when done reading.
func (r *Reader) Close() error {
return r.body.Close()
}
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.readWithRetry(p)
if r.remain != -1 {
r.remain -= int64(n)
}
if r.checkCRC {
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
// Check CRC here. It would be natural to check it in Close, but
// everybody defers Close on the assumption that it doesn't return
// anything worth looking at.
if err == io.EOF {
if r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
}
}
}
return n, err
}
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if !shouldRetryRead(err) {
return n, err
}
// Read failed, but we will try again. Send a ranged read request that takes
// into account the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
return n, err
}
r.body.Close()
r.body = res.Body
}
return n, nil
}
func shouldRetryRead(err error) bool {
if err == nil {
return false
}
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
}
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.
//
// Deprecated: use Reader.Attrs.Size.
func (r *Reader) Size() int64 {
return r.Attrs.Size
}
// Remain returns the number of bytes left to read, or -1 if unknown.
func (r *Reader) Remain() int64 {
return r.remain
}
// ContentType returns the content type of the object.
//
// Deprecated: use Reader.Attrs.ContentType.
func (r *Reader) ContentType() string {
return r.Attrs.ContentType
}
// ContentEncoding returns the content encoding of the object.
//
// Deprecated: use Reader.Attrs.ContentEncoding.
func (r *Reader) ContentEncoding() string {
return r.Attrs.ContentEncoding
}
// CacheControl returns the cache control of the object.
//
// Deprecated: use Reader.Attrs.CacheControl.
func (r *Reader) CacheControl() string {
return r.Attrs.CacheControl
}
// LastModified returns the value of the Last-Modified header.
//
// Deprecated: use Reader.Attrs.LastModified.
func (r *Reader) LastModified() (time.Time, error) {
return r.Attrs.LastModified, nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

@ -1,260 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"sync"
"unicode/utf8"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
// A Writer writes a Cloud Storage object.
type Writer struct {
// ObjectAttrs are optional attributes to set on the object. Any attributes
// must be initialized before the first Write call. Nil or zero-valued
// attributes are ignored.
ObjectAttrs
// SendCRC specifies whether to transmit a CRC32C field. It should be set
// to true in addition to setting the Writer's CRC32C field, because zero
// is a valid CRC and normally a zero would not be transmitted.
// If a CRC32C is sent, and the data written does not match the checksum,
// the write will be rejected.
SendCRC32C bool
// ChunkSize controls the maximum number of bytes of the object that the
// Writer will attempt to send to the server in a single request. Objects
// smaller than the size will be sent in a single request, while larger
// objects will be split over multiple requests. The size will be rounded up
// to the nearest multiple of 256K. If zero, chunking will be disabled and
// the object will be uploaded in a single request.
//
// ChunkSize will default to a reasonable value. If you perform many concurrent
// writes of small objects, you may wish set ChunkSize to a value that matches
// your objects' sizes to avoid consuming large amounts of memory.
//
// ChunkSize must be set before the first Write call.
ChunkSize int
// ProgressFunc can be used to monitor the progress of a large write.
// operation. If ProgressFunc is not nil and writing requires multiple
// calls to the underlying service (see
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
// then ProgressFunc will be invoked after each call with the number of bytes of
// content copied so far.
//
// ProgressFunc should return quickly without blocking.
ProgressFunc func(int64)
ctx context.Context
o *ObjectHandle
opened bool
pw *io.PipeWriter
donec chan struct{} // closed after err and obj are set.
obj *ObjectAttrs
mu sync.Mutex
err error
}
func (w *Writer) open() error {
attrs := w.ObjectAttrs
// Check the developer didn't change the object Name (this is unfortunate, but
// we don't want to store an object under the wrong name).
if attrs.Name != w.o.object {
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
}
if !utf8.ValidString(attrs.Name) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
}
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
}
pr, pw := io.Pipe()
w.pw = pw
w.opened = true
go w.monitorCancel()
if w.ChunkSize < 0 {
return errors.New("storage: Writer.ChunkSize must be non-negative")
}
mediaOpts := []googleapi.MediaOption{
googleapi.ChunkSize(w.ChunkSize),
}
if c := attrs.ContentType; c != "" {
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
}
go func() {
defer close(w.donec)
rawObj := attrs.toRawObject(w.o.bucket)
if w.SendCRC32C {
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
}
if w.MD5 != nil {
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
}
if w.o.c.envHost != "" {
w.o.c.raw.BasePath = fmt.Sprintf("%s://%s", w.o.c.scheme, w.o.c.envHost)
}
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
Media(pr, mediaOpts...).
Projection("full").
Context(w.ctx)
if w.ProgressFunc != nil {
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
}
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if attrs.PredefinedACL != "" {
call.PredefinedAcl(attrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err
w.mu.Unlock()
pr.CloseWithError(err)
return
}
var resp *raw.Object
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
if err == nil {
if w.o.userProject != "" {
call.UserProject(w.o.userProject)
}
setClientHeader(call.Header())
// The internals that perform call.Do automatically retry
// uploading chunks, hence no need to add retries here.
// See issue https://github.com/googleapis/google-cloud-go/issues/1507.
//
// However, since this whole call's internals involve making the initial
// resumable upload session, the first HTTP request is not retried.
// TODO: Follow-up with google.golang.org/gensupport to solve
// https://github.com/googleapis/google-api-go-client/issues/392.
resp, err = call.Do()
}
if err != nil {
w.mu.Lock()
w.err = err
w.mu.Unlock()
pr.CloseWithError(err)
return
}
w.obj = newObject(resp)
}()
return nil
}
// Write appends to w. It implements the io.Writer interface.
//
// Since writes happen asynchronously, Write may return a nil
// error even though the write failed (or will fail). Always
// use the error returned from Writer.Close to determine if
// the upload was successful.
func (w *Writer) Write(p []byte) (n int, err error) {
w.mu.Lock()
werr := w.err
w.mu.Unlock()
if werr != nil {
return 0, werr
}
if !w.opened {
if err := w.open(); err != nil {
return 0, err
}
}
n, err = w.pw.Write(p)
if err != nil {
w.mu.Lock()
werr := w.err
w.mu.Unlock()
// Preserve existing functionality that when context is canceled, Write will return
// context.Canceled instead of "io: read/write on closed pipe". This hides the
// pipe implementation detail from users and makes Write seem as though it's an RPC.
if werr == context.Canceled || werr == context.DeadlineExceeded {
return n, werr
}
}
return n, err
}
// Close completes the write operation and flushes any buffered data.
// If Close doesn't return an error, metadata about the written object
// can be retrieved by calling Attrs.
func (w *Writer) Close() error {
if !w.opened {
if err := w.open(); err != nil {
return err
}
}
// Closing either the read or write causes the entire pipe to close.
if err := w.pw.Close(); err != nil {
return err
}
<-w.donec
w.mu.Lock()
defer w.mu.Unlock()
return w.err
}
// monitorCancel is intended to be used as a background goroutine. It monitors the
// context, and when it observes that the context has been canceled, it manually
// closes things that do not take a context.
func (w *Writer) monitorCancel() {
select {
case <-w.ctx.Done():
w.mu.Lock()
werr := w.ctx.Err()
w.err = werr
w.mu.Unlock()
// Closing either the read or write causes the entire pipe to close.
w.CloseWithError(werr)
case <-w.donec:
}
}
// CloseWithError aborts the write operation with the provided error.
// CloseWithError always returns nil.
//
// Deprecated: cancel the context passed to NewWriter instead.
func (w *Writer) CloseWithError(err error) error {
if !w.opened {
return nil
}
return w.pw.CloseWithError(err)
}
// Attrs returns metadata about a successfully-written object.
// It's only valid to call it after Close returns nil.
func (w *Writer) Attrs() *ObjectAttrs {
return w.obj
}

@ -1,218 +0,0 @@
// Package cidr is a collection of assorted utilities for computing
// network and host addresses within network ranges.
//
// It expects a CIDR-type address structure where addresses are divided into
// some number of prefix bits representing the network and then the remaining
// suffix bits represent the host.
//
// For example, it can help to calculate addresses for sub-networks of a
// parent network, or to calculate host addresses within a particular prefix.
//
// At present this package is prioritizing simplicity of implementation and
// de-prioritizing speed and memory usage. Thus caution is advised before
// using this package in performance-critical applications or hot codepaths.
// Patches to improve the speed and memory usage may be accepted as long as
// they do not result in a significant increase in code complexity.
package cidr
import (
"fmt"
"math/big"
"net"
)
// Subnet takes a parent CIDR range and creates a subnet within it
// with the given number of additional prefix bits and the given
// network number.
//
// For example, 10.3.0.0/16, extended by 8 bits, with a network number
// of 5, becomes 10.3.5.0/24 .
func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) {
ip := base.IP
mask := base.Mask
parentLen, addrLen := mask.Size()
newPrefixLen := parentLen + newBits
if newPrefixLen > addrLen {
return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits)
}
maxNetNum := uint64(1<<uint64(newBits)) - 1
if uint64(num) > maxNetNum {
return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num)
}
return &net.IPNet{
IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen),
Mask: net.CIDRMask(newPrefixLen, addrLen),
}, nil
}
// Host takes a parent CIDR range and turns it into a host IP address with
// the given host number.
//
// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2.
func Host(base *net.IPNet, num int) (net.IP, error) {
ip := base.IP
mask := base.Mask
bigNum := big.NewInt(int64(num))
parentLen, addrLen := mask.Size()
hostLen := addrLen - parentLen
maxHostNum := big.NewInt(int64(1))
maxHostNum.Lsh(maxHostNum, uint(hostLen))
maxHostNum.Sub(maxHostNum, big.NewInt(1))
numUint64 := big.NewInt(int64(bigNum.Uint64()))
if bigNum.Cmp(big.NewInt(0)) == -1 {
numUint64.Neg(bigNum)
numUint64.Sub(numUint64, big.NewInt(int64(1)))
bigNum.Sub(maxHostNum, numUint64)
}
if numUint64.Cmp(maxHostNum) == 1 {
return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num)
}
var bitlength int
if ip.To4() != nil {
bitlength = 32
} else {
bitlength = 128
}
return insertNumIntoIP(ip, bigNum, bitlength), nil
}
// AddressRange returns the first and last addresses in the given CIDR range.
func AddressRange(network *net.IPNet) (net.IP, net.IP) {
// the first IP is easy
firstIP := network.IP
// the last IP is the network address OR NOT the mask address
prefixLen, bits := network.Mask.Size()
if prefixLen == bits {
// Easy!
// But make sure that our two slices are distinct, since they
// would be in all other cases.
lastIP := make([]byte, len(firstIP))
copy(lastIP, firstIP)
return firstIP, lastIP
}
firstIPInt, bits := ipToInt(firstIP)
hostLen := uint(bits) - uint(prefixLen)
lastIPInt := big.NewInt(1)
lastIPInt.Lsh(lastIPInt, hostLen)
lastIPInt.Sub(lastIPInt, big.NewInt(1))
lastIPInt.Or(lastIPInt, firstIPInt)
return firstIP, intToIP(lastIPInt, bits)
}
// AddressCount returns the number of distinct host addresses within the given
// CIDR range.
//
// Since the result is a uint64, this function returns meaningful information
// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65.
func AddressCount(network *net.IPNet) uint64 {
prefixLen, bits := network.Mask.Size()
return 1 << (uint64(bits) - uint64(prefixLen))
}
//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies
//none of the subnets overlap and all subnets are in the supernet
//it returns an error if any of those conditions are not satisfied
func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error {
firstLastIP := make([][]net.IP, len(subnets))
for i, s := range subnets {
first, last := AddressRange(s)
firstLastIP[i] = []net.IP{first, last}
}
for i, s := range subnets {
if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) {
return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String())
}
for j := 0; j < len(subnets); j++ {
if i == j {
continue
}
first := firstLastIP[j][0]
last := firstLastIP[j][1]
if s.Contains(first) || s.Contains(last) {
return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String())
}
}
}
return nil
}
// PreviousSubnet returns the subnet of the desired mask in the IP space
// just lower than the start of IPNet provided. If the IP space rolls over
// then the second return value is true
func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
startIP := checkIPv4(network.IP)
previousIP := make(net.IP, len(startIP))
copy(previousIP, startIP)
cMask := net.CIDRMask(prefixLen, 8*len(previousIP))
previousIP = Dec(previousIP)
previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask}
if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) {
return previous, true
}
return previous, false
}
// NextSubnet returns the next available subnet of the desired mask size
// starting for the maximum IP of the offset subnet
// If the IP exceeds the maxium IP then the second return value is true
func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
_, currentLast := AddressRange(network)
mask := net.CIDRMask(prefixLen, 8*len(currentLast))
currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask}
_, last := AddressRange(currentSubnet)
last = Inc(last)
next := &net.IPNet{IP: last.Mask(mask), Mask: mask}
if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) {
return next, true
}
return next, false
}
//Inc increases the IP by one this returns a new []byte for the IP
func Inc(IP net.IP) net.IP {
IP = checkIPv4(IP)
incIP := make([]byte, len(IP))
copy(incIP, IP)
for j := len(incIP) - 1; j >= 0; j-- {
incIP[j]++
if incIP[j] > 0 {
break
}
}
return incIP
}
//Dec decreases the IP by one this returns a new []byte for the IP
func Dec(IP net.IP) net.IP {
IP = checkIPv4(IP)
decIP := make([]byte, len(IP))
copy(decIP, IP)
decIP = checkIPv4(decIP)
for j := len(decIP) - 1; j >= 0; j-- {
decIP[j]--
if decIP[j] < 255 {
break
}
}
return decIP
}
func checkIPv4(ip net.IP) net.IP {
// Go for some reason allocs IPv6len for IPv4 so we have to correct it
if v4 := ip.To4(); v4 != nil {
return v4
}
return ip
}

@ -1,37 +0,0 @@
package cidr
import (
"fmt"
"math/big"
"net"
)
func ipToInt(ip net.IP) (*big.Int, int) {
val := &big.Int{}
val.SetBytes([]byte(ip))
if len(ip) == net.IPv4len {
return val, 32
} else if len(ip) == net.IPv6len {
return val, 128
} else {
panic(fmt.Errorf("Unsupported address length %d", len(ip)))
}
}
func intToIP(ipInt *big.Int, bits int) net.IP {
ipBytes := ipInt.Bytes()
ret := make([]byte, bits/8)
// Pack our IP bytes into the end of the return array,
// since big.Int.Bytes() removes front zero padding.
for i := 1; i <= len(ipBytes); i++ {
ret[len(ret)-i] = ipBytes[len(ipBytes)-i]
}
return net.IP(ret)
}
func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP {
ipInt, totalBits := ipToInt(ip)
bigNum.Lsh(bigNum, uint(totalBits-prefixLen))
ipInt.Or(ipInt, bigNum)
return intToIP(ipInt, totalBits)
}

@ -1,95 +0,0 @@
Copyright (c) 2017 Martin Atkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------
Unicode table generation programs are under a separate copyright and license:
Copyright (c) 2014 Couchbase, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions
and limitations under the License.
---------
Grapheme break data is provided as part of the Unicode character database,
copright 2016 Unicode, Inc, which is provided with the following license:
Unicode Data Files include all data files under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
http://www.unicode.org/utility/trac/browser/.
Unicode Data Files do not include PDF online code charts under the
directory http://www.unicode.org/Public/.
Software includes any source code published in the Unicode Standard
or under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
http://www.unicode.org/utility/trac/browser/.
NOTICE TO USER: Carefully read the following legal agreement.
BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
TERMS AND CONDITIONS OF THIS AGREEMENT.
IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
THE DATA FILES OR SOFTWARE.
COPYRIGHT AND PERMISSION NOTICE
Copyright © 1991-2017 Unicode, Inc. All rights reserved.
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Unicode data files and any associated documentation
(the "Data Files") or Unicode software and any associated documentation
(the "Software") to deal in the Data Files or Software
without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, and/or sell copies of
the Data Files or Software, and to permit persons to whom the Data Files
or Software are furnished to do so, provided that either
(a) this copyright and permission notice appear with all copies
of the Data Files or Software, or
(b) this copyright and permission notice appear in associated
Documentation.
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale,
use or other dealings in these Data Files or Software without prior
written authorization of the copyright holder.

@ -1,30 +0,0 @@
package textseg
import (
"bufio"
"bytes"
)
// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
// all of the recognized tokens in the given buffer.
func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
scanner := bufio.NewScanner(bytes.NewReader(buf))
scanner.Split(splitFunc)
var ret [][]byte
for scanner.Scan() {
ret = append(ret, scanner.Bytes())
}
return ret, scanner.Err()
}
// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
// recognized tokens in the given buffer.
func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
scanner := bufio.NewScanner(bytes.NewReader(buf))
scanner.Split(splitFunc)
var ret int
for scanner.Scan() {
ret++
}
return ret, scanner.Err()
}

@ -1,7 +0,0 @@
package textseg
//go:generate go run make_tables.go -output tables.go
//go:generate go run make_test_tables.go -output tables_test.go
//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl
//go:generate ragel -Z grapheme_clusters.rl
//go:generate gofmt -w grapheme_clusters.go

File diff suppressed because it is too large Load Diff

@ -1,132 +0,0 @@
package textseg
import (
"errors"
"unicode/utf8"
)
// Generated from grapheme_clusters.rl. DO NOT EDIT
%%{
# (except you are actually in grapheme_clusters.rl here, so edit away!)
machine graphclust;
write data;
}%%
var Error = errors.New("invalid UTF8 text")
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
// on grapheme cluster boundaries.
func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
if len(data) == 0 {
return 0, nil, nil
}
// Ragel state
cs := 0 // Current State
p := 0 // "Pointer" into data
pe := len(data) // End-of-data "pointer"
ts := 0
te := 0
act := 0
eof := pe
// Make Go compiler happy
_ = ts
_ = te
_ = act
_ = eof
startPos := 0
endPos := 0
%%{
include GraphemeCluster "grapheme_clusters_table.rl";
action start {
startPos = p
}
action end {
endPos = p
}
action emit {
return endPos+1, data[startPos:endPos+1], nil
}
ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?;
AnyExtender = Extend | ZWJGlue | SpacingMark;
Extension = AnyExtender*;
ReplacementChar = (0xEF 0xBF 0xBD);
CRLFSeq = CR LF;
ControlSeq = Control | ReplacementChar;
HangulSeq = (
L+ (((LV? V+ | LVT) T*)?|LV?) |
LV V* T* |
V+ T* |
LVT T* |
T+
) Extension;
EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension;
ZWJSeq = ZWJGlue Extension;
EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
UTF8Cont = 0x80 .. 0xBF;
AnyUTF8 = (
0x00..0x7F |
0xC0..0xDF . UTF8Cont |
0xE0..0xEF . UTF8Cont . UTF8Cont |
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
);
# OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension;
# PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
CRLFTok = CRLFSeq >start @end;
ControlTok = ControlSeq >start @end;
HangulTok = HangulSeq >start @end;
EmojiTok = EmojiSeq >start @end;
ZWJTok = ZWJSeq >start @end;
EmojiFlagTok = EmojiFlagSeq >start @end;
OtherTok = OtherSeq >start @end;
PrependTok = PrependSeq >start @end;
main := |*
CRLFTok => emit;
ControlTok => emit;
HangulTok => emit;
EmojiTok => emit;
ZWJTok => emit;
EmojiFlagTok => emit;
PrependTok => emit;
OtherTok => emit;
# any single valid UTF-8 character would also be valid per spec,
# but we'll handle that separately after the loop so we can deal
# with requesting more bytes if we're not at EOF.
*|;
write init;
write exec;
}%%
// If we fall out here then we were unable to complete a sequence.
// If we weren't able to complete a sequence then either we've
// reached the end of a partial buffer (so there's more data to come)
// or we have an isolated symbol that would normally be part of a
// grapheme cluster but has appeared in isolation here.
if !atEOF {
// Request more
return 0, nil, nil
}
// Just take the first UTF-8 sequence and return that.
_, seqLen := utf8.DecodeRune(data)
return seqLen, data[:seqLen], nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,335 +0,0 @@
#!/usr/bin/env ruby
#
# This scripted has been updated to accept more command-line arguments:
#
# -u, --url URL to process
# -m, --machine Machine name
# -p, --properties Properties to add to the machine
# -o, --output Write output to file
#
# Updated by: Marty Schoch <marty.schoch@gmail.com>
#
# This script uses the unicode spec to generate a Ragel state machine
# that recognizes unicode alphanumeric characters. It generates 5
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
# Currently supported encodings are UTF-8 [default] and UCS-4.
#
# Usage: unicode2ragel.rb [options]
# -e, --encoding [ucs4 | utf8] Data encoding
# -h, --help Show this message
#
# This script was originally written as part of the Ferret search
# engine library.
#
# Author: Rakan El-Khalil <rakan@well.com>
require 'optparse'
require 'open-uri'
ENCODINGS = [ :utf8, :ucs4 ]
ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
DEFAULT_MACHINE_NAME= "WChar"
###
# Display vars & default option
TOTAL_WIDTH = 80
RANGE_WIDTH = 23
@encoding = :utf8
@chart_url = DEFAULT_CHART_URL
machine_name = DEFAULT_MACHINE_NAME
properties = []
@output = $stdout
###
# Option parsing
cli_opts = OptionParser.new do |opts|
opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
@encoding = o.downcase.to_sym
end
opts.on("-h", "--help", "Show this message") do
puts opts
exit
end
opts.on("-u", "--url URL", "URL to process") do |o|
@chart_url = o
end
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
machine_name = o
end
opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
properties = o
end
opts.on("-o", "--output FILE", "output file") do |o|
@output = File.new(o, "w+")
end
end
cli_opts.parse(ARGV)
unless ENCODINGS.member? @encoding
puts "Invalid encoding: #{@encoding}"
puts cli_opts
exit
end
##
# Downloads the document at url and yields every alpha line's hex
# range and description.
def each_alpha( url, property )
open( url ) do |file|
file.each_line do |line|
next if line =~ /^#/;
next if line !~ /; #{property} #/;
range, description = line.split(/;/)
range.strip!
description.gsub!(/.*#/, '').strip!
if range =~ /\.\./
start, stop = range.split '..'
else start = stop = range
end
yield start.hex .. stop.hex, description
end
end
end
###
# Formats to hex at minimum width
def to_hex( n )
r = "%0X" % n
r = "0#{r}" unless (r.length % 2).zero?
r
end
###
# UCS4 is just a straight hex conversion of the unicode codepoint.
def to_ucs4( range )
rangestr = "0x" + to_hex(range.begin)
rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
[ rangestr ]
end
##
# 0x00 - 0x7f -> 0zzzzzzz[7]
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
def to_utf8_enc( n )
r = 0
if n <= 0x7f
r = n
elsif n <= 0x7ff
y = 0xc0 | (n >> 6)
z = 0x80 | (n & 0x3f)
r = y << 8 | z
elsif n <= 0xffff
x = 0xe0 | (n >> 12)
y = 0x80 | (n >> 6) & 0x3f
z = 0x80 | n & 0x3f
r = x << 16 | y << 8 | z
elsif n <= 0x10ffff
w = 0xf0 | (n >> 18)
x = 0x80 | (n >> 12) & 0x3f
y = 0x80 | (n >> 6) & 0x3f
z = 0x80 | n & 0x3f
r = w << 24 | x << 16 | y << 8 | z
end
to_hex(r)
end
def from_utf8_enc( n )
n = n.hex
r = 0
if n <= 0x7f
r = n
elsif n <= 0xdfff
y = (n >> 8) & 0x1f
z = n & 0x3f
r = y << 6 | z
elsif n <= 0xefffff
x = (n >> 16) & 0x0f
y = (n >> 8) & 0x3f
z = n & 0x3f
r = x << 10 | y << 6 | z
elsif n <= 0xf7ffffff
w = (n >> 24) & 0x07
x = (n >> 16) & 0x3f
y = (n >> 8) & 0x3f
z = n & 0x3f
r = w << 18 | x << 12 | y << 6 | z
end
r
end
###
# Given a range, splits it up into ranges that can be continuously
# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
# This is not strictly needed since the current [5.1] unicode standard
# doesn't have ranges that straddle utf8 boundaries. This is included
# for completeness as there is no telling if that will ever change.
def utf8_ranges( range )
ranges = []
UTF8_BOUNDARIES.each do |max|
if range.begin <= max
if range.end <= max
ranges << range
return ranges
end
ranges << (range.begin .. max)
range = (max + 1) .. range.end
end
end
ranges
end
def build_range( start, stop )
size = start.size/2
left = size - 1
return [""] if size < 1
a = start[0..1]
b = stop[0..1]
###
# Shared prefix
if a == b
return build_range(start[2..-1], stop[2..-1]).map do |elt|
"0x#{a} " + elt
end
end
###
# Unshared prefix, end of run
return ["0x#{a}..0x#{b} "] if left.zero?
###
# Unshared prefix, not end of run
# Range can be 0x123456..0x56789A
# Which is equivalent to:
# 0x123456 .. 0x12FFFF
# 0x130000 .. 0x55FFFF
# 0x560000 .. 0x56789A
ret = []
ret << build_range(start, a + "FF" * left)
###
# Only generate middle range if need be.
if a.hex+1 != b.hex
max = to_hex(b.hex - 1)
max = "FF" if b == "FF"
ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
end
###
# Don't generate last range if it is covered by first range
ret << build_range(b + "00" * left, stop) unless b == "FF"
ret.flatten!
end
def to_utf8( range )
utf8_ranges( range ).map do |r|
begin_enc = to_utf8_enc(r.begin)
end_enc = to_utf8_enc(r.end)
build_range begin_enc, end_enc
end.flatten!
end
##
# Perform a 3-way comparison of the number of codepoints advertised by
# the unicode spec for the given range, the originally parsed range,
# and the resulting utf8 encoded range.
def count_codepoints( code )
code.split(' ').inject(1) do |acc, elt|
if elt =~ /0x(.+)\.\.0x(.+)/
if @encoding == :utf8
acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
else
acc * ($2.hex - $1.hex + 1)
end
else
acc
end
end
end
def is_valid?( range, desc, codes )
spec_count = 1
spec_count = $1.to_i if desc =~ /\[(\d+)\]/
range_count = range.end - range.begin + 1
sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
sum == spec_count and sum == range_count
end
##
# Generate the state maching to stdout
def generate_machine( name, property )
pipe = " "
@output.puts " #{name} = "
each_alpha( @chart_url, property ) do |range, desc|
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
# is_valid? range, desc, codes
range_width = codes.map { |a| a.size }.max
range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
if desc.size > desc_width
desc = desc[0..desc_width - 4] + "..."
end
codes.each_with_index do |r, idx|
desc = "" unless idx.zero?
code = "%-#{range_width}s" % r
@output.puts " #{pipe} #{code} ##{desc}"
pipe = "|"
end
end
@output.puts " ;"
@output.puts ""
end
@output.puts <<EOF
# The following Ragel file was autogenerated with #{$0}
# from: #{@chart_url}
#
# It defines #{properties}.
#
# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
# and that your input is in #{@encoding}.
%%{
machine #{machine_name};
EOF
properties.each { |x| generate_machine( x, x ) }
@output.puts <<EOF
}%%
EOF

@ -1,19 +0,0 @@
package textseg
import "unicode/utf8"
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
// on UTF8 sequence boundaries.
//
// This is included largely for completeness, since this behavior is already
// built in to Go when ranging over a string.
func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) {
if len(data) == 0 {
return 0, nil, nil
}
r, seqLen := utf8.DecodeRune(data)
if r == utf8.RuneError && !atEOF {
return 0, nil, nil
}
return seqLen, data[:seqLen], nil
}

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,3 +0,0 @@
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.

@ -1,164 +0,0 @@
// Package awserr represents API error interface accessors for the SDK.
package awserr
// An Error wraps lower level errors with code, message and an original error.
// The underlying concrete error type may also satisfy other interfaces which
// can be to used to obtain more specific information about the error.
//
// Calling Error() or String() will always include the full information about
// an error based on its underlying type.
//
// Example:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if awsErr, ok := err.(awserr.Error); ok {
// // Get error details
// log.Println("Error:", awsErr.Code(), awsErr.Message())
//
// // Prints out full error message, including original error if there was one.
// log.Println("Error:", awsErr.Error())
//
// // Get original error
// if origErr := awsErr.OrigErr(); origErr != nil {
// // operate on original error.
// }
// } else {
// fmt.Println(err.Error())
// }
// }
//
type Error interface {
// Satisfy the generic error interface.
error
// Returns the short phrase depicting the classification of the error.
Code() string
// Returns the error details message.
Message() string
// Returns the original error if one was set. Nil is returned if not set.
OrigErr() error
}
// BatchError is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occurred in the batch.
//
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
// compatibility.
type BatchError interface {
// Satisfy the generic error interface.
error
// Returns the short phrase depicting the classification of the error.
Code() string
// Returns the error details message.
Message() string
// Returns the original error if one was set. Nil is returned if not set.
OrigErrs() []error
}
// BatchedErrors is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occurred in the batch.
//
// Replaces BatchError
type BatchedErrors interface {
// Satisfy the base Error interface.
Error
// Returns the original error if one was set. Nil is returned if not set.
OrigErrs() []error
}
// New returns an Error object described by the code, message, and origErr.
//
// If origErr satisfies the Error interface it will not be wrapped within a new
// Error object and will instead be returned.
func New(code, message string, origErr error) Error {
var errs []error
if origErr != nil {
errs = append(errs, origErr)
}
return newBaseError(code, message, errs)
}
// NewBatchError returns an BatchedErrors with a collection of errors as an
// array of errors.
func NewBatchError(code, message string, errs []error) BatchedErrors {
return newBaseError(code, message, errs)
}
// A RequestFailure is an interface to extract request failure information from
// an Error such as the request ID of the failed request returned by a service.
// RequestFailures may not always have a requestID value if the request failed
// prior to reaching the service such as a connection error.
//
// Example:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if reqerr, ok := err.(RequestFailure); ok {
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
// } else {
// log.Println("Error:", err.Error())
// }
// }
//
// Combined with awserr.Error:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if awsErr, ok := err.(awserr.Error); ok {
// // Generic AWS Error with Code, Message, and original error (if any)
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
//
// if reqErr, ok := err.(awserr.RequestFailure); ok {
// // A service error occurred
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
// }
// } else {
// fmt.Println(err.Error())
// }
// }
//
type RequestFailure interface {
Error
// The status code of the HTTP response.
StatusCode() int
// The request ID returned by the service for a request failure. This will
// be empty if no request ID is available such as the request failed due
// to a connection error.
RequestID() string
}
// NewRequestFailure returns a wrapped error with additional information for
// request status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
// that may be meaningful.
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
return newRequestError(err, statusCode, reqID)
}
// UnmarshalError provides the interface for the SDK failing to unmarshal data.
type UnmarshalError interface {
awsError
Bytes() []byte
}
// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
// the bytes that fail to unmarshal to the error.
func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
return &unmarshalError{
awsError: New("UnmarshalError", msg, err),
bytes: bytes,
}
}

@ -1,221 +0,0 @@
package awserr
import (
"encoding/hex"
"fmt"
)
// SprintError returns a string of the formatted error code.
//
// Both extra and origErr are optional. If they are included their lines
// will be added, but if they are not included their lines will be ignored.
func SprintError(code, message, extra string, origErr error) string {
msg := fmt.Sprintf("%s: %s", code, message)
if extra != "" {
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
}
if origErr != nil {
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
}
return msg
}
// A baseError wraps the code and message which defines an error. It also
// can be used to wrap an original error object.
//
// Should be used as the root for errors satisfying the awserr.Error. Also
// for any error which does not fit into a specific error wrapper type.
type baseError struct {
// Classification of error
code string
// Detailed information about error
message string
// Optional original error this error is based off of. Allows building
// chained errors.
errs []error
}
// newBaseError returns an error object for the code, message, and errors.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the
// error.
//
// origErrs is the error objects which will be nested under the new errors to
// be returned.
func newBaseError(code, message string, origErrs []error) *baseError {
b := &baseError{
code: code,
message: message,
errs: origErrs,
}
return b
}
// Error returns the string representation of the error.
//
// See ErrorWithExtra for formatting.
//
// Satisfies the error interface.
func (b baseError) Error() string {
size := len(b.errs)
if size > 0 {
return SprintError(b.code, b.message, "", errorList(b.errs))
}
return SprintError(b.code, b.message, "", nil)
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (b baseError) String() string {
return b.Error()
}
// Code returns the short phrase depicting the classification of the error.
func (b baseError) Code() string {
return b.code
}
// Message returns the error details message.
func (b baseError) Message() string {
return b.message
}
// OrigErr returns the original error if one was set. Nil is returned if no
// error was set. This only returns the first element in the list. If the full
// list is needed, use BatchedErrors.
func (b baseError) OrigErr() error {
switch len(b.errs) {
case 0:
return nil
case 1:
return b.errs[0]
default:
if err, ok := b.errs[0].(Error); ok {
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
}
return NewBatchError("BatchedErrors",
"multiple errors occurred", b.errs)
}
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (b baseError) OrigErrs() []error {
return b.errs
}
// So that the Error interface type can be included as an anonymous field
// in the requestError struct and not conflict with the error.Error() method.
type awsError Error
// A requestError wraps a request or service error.
//
// Composed of baseError for code, message, and original error.
type requestError struct {
awsError
statusCode int
requestID string
bytes []byte
}
// newRequestError returns a wrapped error with additional information for
// request status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
// that may be meaningful.
//
// Also wraps original errors via the baseError.
func newRequestError(err Error, statusCode int, requestID string) *requestError {
return &requestError{
awsError: err,
statusCode: statusCode,
requestID: requestID,
}
}
// Error returns the string representation of the error.
// Satisfies the error interface.
func (r requestError) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s",
r.statusCode, r.requestID)
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (r requestError) String() string {
return r.Error()
}
// StatusCode returns the wrapped status code for the error
func (r requestError) StatusCode() int {
return r.statusCode
}
// RequestID returns the wrapped requestID
func (r requestError) RequestID() string {
return r.requestID
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (r requestError) OrigErrs() []error {
if b, ok := r.awsError.(BatchedErrors); ok {
return b.OrigErrs()
}
return []error{r.OrigErr()}
}
type unmarshalError struct {
awsError
bytes []byte
}
// Error returns the string representation of the error.
// Satisfies the error interface.
func (e unmarshalError) Error() string {
extra := hex.Dump(e.bytes)
return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (e unmarshalError) String() string {
return e.Error()
}
// Bytes returns the bytes that failed to unmarshal.
func (e unmarshalError) Bytes() []byte {
return e.bytes
}
// An error list that satisfies the golang interface
type errorList []error
// Error returns the string representation of the error.
//
// Satisfies the error interface.
func (e errorList) Error() string {
msg := ""
// How do we want to handle the array size being zero
if size := len(e); size > 0 {
for i := 0; i < size; i++ {
msg += e[i].Error()
// We check the next index to see if it is within the slice.
// If it is, then we append a newline. We do this, because unit tests
// could be broken with the additional '\n'
if i+1 < size {
msg += "\n"
}
}
}
return msg
}

@ -1,108 +0,0 @@
package awsutil
import (
"io"
"reflect"
"time"
)
// Copy deeply copies a src structure to dst. Useful for copying request and
// response structures.
//
// Can copy between structs of different type, but will only copy fields which
// are assignable, and exist in both structs. Fields which are not assignable,
// or do not exist in both structs are ignored.
func Copy(dst, src interface{}) {
dstval := reflect.ValueOf(dst)
if !dstval.IsValid() {
panic("Copy dst cannot be nil")
}
rcopy(dstval, reflect.ValueOf(src), true)
}
// CopyOf returns a copy of src while also allocating the memory for dst.
// src must be a pointer type or this operation will fail.
func CopyOf(src interface{}) (dst interface{}) {
dsti := reflect.New(reflect.TypeOf(src).Elem())
dst = dsti.Interface()
rcopy(dsti, reflect.ValueOf(src), true)
return
}
// rcopy performs a recursive copy of values from the source to destination.
//
// root is used to skip certain aspects of the copy which are not valid
// for the root node of a object.
func rcopy(dst, src reflect.Value, root bool) {
if !src.IsValid() {
return
}
switch src.Kind() {
case reflect.Ptr:
if _, ok := src.Interface().(io.Reader); ok {
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
dst.Elem().Set(src)
} else if dst.CanSet() {
dst.Set(src)
}
} else {
e := src.Type().Elem()
if dst.CanSet() && !src.IsNil() {
if _, ok := src.Interface().(*time.Time); !ok {
dst.Set(reflect.New(e))
} else {
tempValue := reflect.New(e)
tempValue.Elem().Set(src.Elem())
// Sets time.Time's unexported values
dst.Set(tempValue)
}
}
if src.Elem().IsValid() {
// Keep the current root state since the depth hasn't changed
rcopy(dst.Elem(), src.Elem(), root)
}
}
case reflect.Struct:
t := dst.Type()
for i := 0; i < t.NumField(); i++ {
name := t.Field(i).Name
srcVal := src.FieldByName(name)
dstVal := dst.FieldByName(name)
if srcVal.IsValid() && dstVal.CanSet() {
rcopy(dstVal, srcVal, false)
}
}
case reflect.Slice:
if src.IsNil() {
break
}
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
dst.Set(s)
for i := 0; i < src.Len(); i++ {
rcopy(dst.Index(i), src.Index(i), false)
}
case reflect.Map:
if src.IsNil() {
break
}
s := reflect.MakeMap(src.Type())
dst.Set(s)
for _, k := range src.MapKeys() {
v := src.MapIndex(k)
v2 := reflect.New(v.Type()).Elem()
rcopy(v2, v, false)
dst.SetMapIndex(k, v2)
}
default:
// Assign the value if possible. If its not assignable, the value would
// need to be converted and the impact of that may be unexpected, or is
// not compatible with the dst type.
if src.Type().AssignableTo(dst.Type()) {
dst.Set(src)
}
}
}

@ -1,27 +0,0 @@
package awsutil
import (
"reflect"
)
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
// In addition to this, this method will also dereference the input values if
// possible so the DeepEqual performed will not fail if one parameter is a
// pointer and the other is not.
//
// DeepEqual will not perform indirection of nested values of the input parameters.
func DeepEqual(a, b interface{}) bool {
ra := reflect.Indirect(reflect.ValueOf(a))
rb := reflect.Indirect(reflect.ValueOf(b))
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
// If the elements are both nil, and of the same type they are equal
// If they are of different types they are not equal
return reflect.TypeOf(a) == reflect.TypeOf(b)
} else if raValid != rbValid {
// Both values must be valid to be equal
return false
}
return reflect.DeepEqual(ra.Interface(), rb.Interface())
}

@ -1,221 +0,0 @@
package awsutil
import (
"reflect"
"regexp"
"strconv"
"strings"
"github.com/jmespath/go-jmespath"
)
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
// rValuesAtPath returns a slice of values found in value v. The values
// in v are explored recursively so all nested values are collected.
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
pathparts := strings.Split(path, "||")
if len(pathparts) > 1 {
for _, pathpart := range pathparts {
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
if len(vals) > 0 {
return vals
}
}
return nil
}
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
components := strings.Split(path, ".")
for len(values) > 0 && len(components) > 0 {
var index *int64
var indexStar bool
c := strings.TrimSpace(components[0])
if c == "" { // no actual component, illegal syntax
return nil
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
// TODO normalize case for user
return nil // don't support unexported fields
}
// parse this component
if m := indexRe.FindStringSubmatch(c); m != nil {
c = m[1]
if m[2] == "" {
index = nil
indexStar = true
} else {
i, _ := strconv.ParseInt(m[2], 10, 32)
index = &i
indexStar = false
}
}
nextvals := []reflect.Value{}
for _, value := range values {
// pull component name out of struct member
if value.Kind() != reflect.Struct {
continue
}
if c == "*" { // pull all members
for i := 0; i < value.NumField(); i++ {
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
nextvals = append(nextvals, f)
}
}
continue
}
value = value.FieldByNameFunc(func(name string) bool {
if c == name {
return true
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
return true
}
return false
})
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
if !value.IsNil() {
value.Set(reflect.Zero(value.Type()))
}
return []reflect.Value{value}
}
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
// TODO if the value is the terminus it should not be created
// if the value to be set to its position is nil.
value.Set(reflect.New(value.Type().Elem()))
value = value.Elem()
} else {
value = reflect.Indirect(value)
}
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
if !createPath && value.IsNil() {
value = reflect.ValueOf(nil)
}
}
if value.IsValid() {
nextvals = append(nextvals, value)
}
}
values = nextvals
if indexStar || index != nil {
nextvals = []reflect.Value{}
for _, valItem := range values {
value := reflect.Indirect(valItem)
if value.Kind() != reflect.Slice {
continue
}
if indexStar { // grab all indices
for i := 0; i < value.Len(); i++ {
idx := reflect.Indirect(value.Index(i))
if idx.IsValid() {
nextvals = append(nextvals, idx)
}
}
continue
}
// pull out index
i := int(*index)
if i >= value.Len() { // check out of bounds
if createPath {
// TODO resize slice
} else {
continue
}
} else if i < 0 { // support negative indexing
i = value.Len() + i
}
value = reflect.Indirect(value.Index(i))
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
if !createPath && value.IsNil() {
value = reflect.ValueOf(nil)
}
}
if value.IsValid() {
nextvals = append(nextvals, value)
}
}
values = nextvals
}
components = components[1:]
}
return values
}
// ValuesAtPath returns a list of values at the case insensitive lexical
// path inside of a structure.
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
result, err := jmespath.Search(path, i)
if err != nil {
return nil, err
}
v := reflect.ValueOf(result)
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil, nil
}
if s, ok := result.([]interface{}); ok {
return s, err
}
if v.Kind() == reflect.Map && v.Len() == 0 {
return nil, nil
}
if v.Kind() == reflect.Slice {
out := make([]interface{}, v.Len())
for i := 0; i < v.Len(); i++ {
out[i] = v.Index(i).Interface()
}
return out, nil
}
return []interface{}{result}, nil
}
// SetValueAtPath sets a value at the case insensitive lexical path inside
// of a structure.
func SetValueAtPath(i interface{}, path string, v interface{}) {
rvals := rValuesAtPath(i, path, true, false, v == nil)
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
}
setValue(rval, v)
}
}
func setValue(dstVal reflect.Value, src interface{}) {
if dstVal.Kind() == reflect.Ptr {
dstVal = reflect.Indirect(dstVal)
}
srcVal := reflect.ValueOf(src)
if !srcVal.IsValid() { // src is literal nil
if dstVal.CanAddr() {
// Convert to pointer so that pointer's value can be nil'ed
// dstVal = dstVal.Addr()
}
dstVal.Set(reflect.Zero(dstVal.Type()))
} else if srcVal.Kind() == reflect.Ptr {
if srcVal.IsNil() {
srcVal = reflect.Zero(dstVal.Type())
} else {
srcVal = reflect.ValueOf(src).Elem()
}
dstVal.Set(srcVal)
} else {
dstVal.Set(srcVal)
}
}

@ -1,113 +0,0 @@
package awsutil
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
)
// Prettify returns the string representation of a value.
func Prettify(i interface{}) string {
var buf bytes.Buffer
prettify(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
// prettify will recursively walk value v to build a textual
// representation of the value.
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
strtype := v.Type().String()
if strtype == "time.Time" {
fmt.Fprintf(buf, "%s", v.Interface())
break
} else if strings.HasPrefix(strtype, "io.") {
buf.WriteString("<buffer>")
break
}
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
prettify(val, indent+2, buf)
if i < len(names)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
strtype := v.Type().String()
if strtype == "[]uint8" {
fmt.Fprintf(buf, "<binary> len %d", v.Len())
break
}
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
prettify(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
prettify(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
if !v.IsValid() {
fmt.Fprint(buf, "<invalid value>")
return
}
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
case io.ReadSeeker, io.Reader:
format = "buffer(%p)"
}
fmt.Fprintf(buf, format, v.Interface())
}
}

@ -1,88 +0,0 @@
package awsutil
import (
"bytes"
"fmt"
"reflect"
"strings"
)
// StringValue returns the string representation of a value.
func StringValue(i interface{}) string {
var buf bytes.Buffer
stringValue(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
buf.WriteString("{\n")
for i := 0; i < v.Type().NumField(); i++ {
ft := v.Type().Field(i)
fv := v.Field(i)
if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
continue // ignore unexported fields
}
if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
continue // ignore unset fields
}
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(ft.Name + ": ")
if tag := ft.Tag.Get("sensitive"); tag == "true" {
buf.WriteString("<sensitive>")
} else {
stringValue(fv, indent+2, buf)
}
buf.WriteString(",\n")
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
stringValue(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
stringValue(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
}
fmt.Fprintf(buf, format, v.Interface())
}
}

@ -1,96 +0,0 @@
package client
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
)
// A Config provides configuration to a service client instance.
type Config struct {
Config *aws.Config
Handlers request.Handlers
Endpoint string
SigningRegion string
SigningName string
// States that the signing name did not come from a modeled source but
// was derived based on other data. Used by service client constructors
// to determine if the signin name can be overridden based on metadata the
// service has.
SigningNameDerived bool
}
// ConfigProvider provides a generic way for a service client to receive
// the ClientConfig without circular dependencies.
type ConfigProvider interface {
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
}
// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
// resolve the endpoint automatically. The service client's endpoint must be
// provided via the aws.Config.Endpoint field.
type ConfigNoResolveEndpointProvider interface {
ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
}
// A Client implements the base client request and response handling
// used by all service clients.
type Client struct {
request.Retryer
metadata.ClientInfo
Config aws.Config
Handlers request.Handlers
}
// New will return a pointer to a new initialized service client.
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
svc := &Client{
Config: cfg,
ClientInfo: info,
Handlers: handlers.Copy(),
}
switch retryer, ok := cfg.Retryer.(request.Retryer); {
case ok:
svc.Retryer = retryer
case cfg.Retryer != nil && cfg.Logger != nil:
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
cfg.Logger.Log(s)
fallthrough
default:
maxRetries := aws.IntValue(cfg.MaxRetries)
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = DefaultRetryerMaxNumRetries
}
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
}
svc.AddDebugHandlers()
for _, option := range options {
option(svc)
}
return svc
}
// NewRequest returns a new Request pointer for the service API
// operation and parameters.
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
}
// AddDebugHandlers injects debug logging handlers into the service to log request
// debug information.
func (c *Client) AddDebugHandlers() {
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
return
}
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
}

@ -1,177 +0,0 @@
package client
import (
"math"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkrand"
)
// DefaultRetryer implements basic retry logic using exponential backoff for
// most services. If you want to implement custom retry logic, you can implement the
// request.Retryer interface.
//
type DefaultRetryer struct {
// Num max Retries is the number of max retries that will be performed.
// By default, this is zero.
NumMaxRetries int
// MinRetryDelay is the minimum retry delay after which retry will be performed.
// If not set, the value is 0ns.
MinRetryDelay time.Duration
// MinThrottleRetryDelay is the minimum retry delay when throttled.
// If not set, the value is 0ns.
MinThrottleDelay time.Duration
// MaxRetryDelay is the maximum retry delay before which retry must be performed.
// If not set, the value is 0ns.
MaxRetryDelay time.Duration
// MaxThrottleDelay is the maximum retry delay when throttled.
// If not set, the value is 0ns.
MaxThrottleDelay time.Duration
}
const (
// DefaultRetryerMaxNumRetries sets maximum number of retries
DefaultRetryerMaxNumRetries = 3
// DefaultRetryerMinRetryDelay sets minimum retry delay
DefaultRetryerMinRetryDelay = 30 * time.Millisecond
// DefaultRetryerMinThrottleDelay sets minimum delay when throttled
DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
// DefaultRetryerMaxRetryDelay sets maximum retry delay
DefaultRetryerMaxRetryDelay = 300 * time.Second
// DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
DefaultRetryerMaxThrottleDelay = 300 * time.Second
)
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API request.
func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
// setRetryerDefaults sets the default values of the retryer if not set
func (d *DefaultRetryer) setRetryerDefaults() {
if d.MinRetryDelay == 0 {
d.MinRetryDelay = DefaultRetryerMinRetryDelay
}
if d.MaxRetryDelay == 0 {
d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
}
if d.MinThrottleDelay == 0 {
d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
}
if d.MaxThrottleDelay == 0 {
d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
}
}
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// if number of max retries is zero, no retries will be performed.
if d.NumMaxRetries == 0 {
return 0
}
// Sets default value for retryer members
d.setRetryerDefaults()
// minDelay is the minimum retryer delay
minDelay := d.MinRetryDelay
var initialDelay time.Duration
isThrottle := r.IsErrorThrottle()
if isThrottle {
if delay, ok := getRetryAfterDelay(r); ok {
initialDelay = delay
}
minDelay = d.MinThrottleDelay
}
retryCount := r.RetryCount
// maxDelay the maximum retryer delay
maxDelay := d.MaxRetryDelay
if isThrottle {
maxDelay = d.MaxThrottleDelay
}
var delay time.Duration
// Logic to cap the retry count based on the minDelay provided
actualRetryCount := int(math.Log2(float64(minDelay))) + 1
if actualRetryCount < 63-retryCount {
delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay)
if delay > maxDelay {
delay = getJitterDelay(maxDelay / 2)
}
} else {
delay = getJitterDelay(maxDelay / 2)
}
return delay + initialDelay
}
// getJitterDelay returns a jittered delay for retry
func getJitterDelay(duration time.Duration) time.Duration {
return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
}
// ShouldRetry returns true if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
// ShouldRetry returns false if number of max retries is 0.
if d.NumMaxRetries == 0 {
return false
}
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable != nil {
return *r.Retryable
}
return r.IsErrorRetryable() || r.IsErrorThrottle()
}
// This will look in the Retry-After header, RFC 7231, for how long
// it will wait before attempting another request
func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
if !canUseRetryAfterHeader(r) {
return 0, false
}
delayStr := r.HTTPResponse.Header.Get("Retry-After")
if len(delayStr) == 0 {
return 0, false
}
delay, err := strconv.Atoi(delayStr)
if err != nil {
return 0, false
}
return time.Duration(delay) * time.Second, true
}
// Will look at the status code to see if the retry header pertains to
// the status code.
func canUseRetryAfterHeader(r *request.Request) bool {
switch r.HTTPResponse.StatusCode {
case 429:
case 503:
default:
return false
}
return true
}

@ -1,194 +0,0 @@
package client
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http/httputil"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
const logReqMsg = `DEBUG: Request %s/%s Details:
---[ REQUEST POST-SIGN ]-----------------------------
%s
-----------------------------------------------------`
const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
---[ REQUEST DUMP ERROR ]-----------------------------
%s
------------------------------------------------------`
type logWriter struct {
// Logger is what we will use to log the payload of a response.
Logger aws.Logger
// buf stores the contents of what has been read
buf *bytes.Buffer
}
func (logger *logWriter) Write(b []byte) (int, error) {
return logger.buf.Write(b)
}
type teeReaderCloser struct {
// io.Reader will be a tee reader that is used during logging.
// This structure will read from a body and write the contents to a logger.
io.Reader
// Source is used just to close when we are done reading.
Source io.ReadCloser
}
func (reader *teeReaderCloser) Close() error {
return reader.Source.Close()
}
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
// to a service. Will include the HTTP request body if the LogLevel of the
// request matches LogDebugWithHTTPBody.
var LogHTTPRequestHandler = request.NamedHandler{
Name: "awssdk.client.LogRequest",
Fn: logRequest,
}
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
bodySeekable := aws.IsReaderSeekable(r.Body)
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
if logBody {
if !bodySeekable {
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
}
// Reset the request body because dumpRequest will re-wrap the
// r.HTTPRequest's Body as a NoOpCloser and will not be reset after
// read by the HTTP client reader.
if err := r.Error; err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
}
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
// to a service. Will only log the HTTP request's headers. The request payload
// will not be read.
var LogHTTPRequestHeaderHandler = request.NamedHandler{
Name: "awssdk.client.LogRequestHeader",
Fn: logRequestHeader,
}
func logRequestHeader(r *request.Request) {
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
}
const logRespMsg = `DEBUG: Response %s/%s Details:
---[ RESPONSE ]--------------------------------------
%s
-----------------------------------------------------`
const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
---[ RESPONSE DUMP ERROR ]-----------------------------
%s
-----------------------------------------------------`
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
// received from a service. Will include the HTTP response body if the LogLevel
// of the request matches LogDebugWithHTTPBody.
var LogHTTPResponseHandler = request.NamedHandler{
Name: "awssdk.client.LogResponse",
Fn: logResponse,
}
func logResponse(r *request.Request) {
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
if r.HTTPResponse == nil {
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
return
}
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
if logBody {
r.HTTPResponse.Body = &teeReaderCloser{
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
Source: r.HTTPResponse.Body,
}
}
handlerFn := func(req *request.Request) {
b, err := httputil.DumpResponse(req.HTTPResponse, false)
if err != nil {
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
req.ClientInfo.ServiceName, req.Operation.Name, err))
return
}
lw.Logger.Log(fmt.Sprintf(logRespMsg,
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
if logBody {
b, err := ioutil.ReadAll(lw.buf)
if err != nil {
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
req.ClientInfo.ServiceName, req.Operation.Name, err))
return
}
lw.Logger.Log(string(b))
}
}
const handlerName = "awsdk.client.LogResponse.ResponseBody"
r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
Name: handlerName, Fn: handlerFn,
})
r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
Name: handlerName, Fn: handlerFn,
})
}
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
// response received from a service. Will only log the HTTP response's headers.
// The response payload will not be read.
var LogHTTPResponseHeaderHandler = request.NamedHandler{
Name: "awssdk.client.LogResponseHeader",
Fn: logResponseHeader,
}
func logResponseHeader(r *request.Request) {
if r.Config.Logger == nil {
return
}
b, err := httputil.DumpResponse(r.HTTPResponse, false)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
}

@ -1,13 +0,0 @@
package metadata
// ClientInfo wraps immutable data from the client.Client structure.
type ClientInfo struct {
ServiceName string
ServiceID string
APIVersion string
Endpoint string
SigningName string
SigningRegion string
JSONVersion string
TargetPrefix string
}

@ -1,28 +0,0 @@
package client
import (
"time"
"github.com/aws/aws-sdk-go/aws/request"
)
// NoOpRetryer provides a retryer that performs no retries.
// It should be used when we do not want retries to be performed.
type NoOpRetryer struct{}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
func (d NoOpRetryer) MaxRetries() int {
return 0
}
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
return false
}
// RetryRules returns the delay duration before retrying this request again;
// since NoOpRetryer does not retry, RetryRules always returns 0.
func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
return 0
}

@ -1,536 +0,0 @@
package aws
import (
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
)
// UseServiceDefaultRetries instructs the config to use the service's own
// default number of retries. This will be the default action if
// Config.MaxRetries is nil also.
const UseServiceDefaultRetries = -1
// RequestRetryer is an alias for a type that implements the request.Retryer
// interface.
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig structure.
//
// // Create Session with MaxRetries configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3),
// }))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, &aws.Config{
// Region: aws.String("us-west-2"),
// })
type Config struct {
// Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to
// retrieve credentials.
CredentialsChainVerboseErrors *bool
// The credentials object to use when signing requests. Defaults to a
// chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials *credentials.Credentials
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The resolver to use for looking up endpoints for AWS service clients
// to use based on region.
EndpointResolver endpoints.Resolver
// EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
// ShouldRetry regardless of whether or not if request.Retryable is set.
// This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
// is not set, then ShouldRetry will only be called if request.Retryable is nil.
// Proper handling of the request.Retryable field is important when setting this field.
EnforceShouldRetryCheck *bool
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
// Regions and Endpoints.
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
// to `false`.
DisableSSL *bool
// The HTTP client to use when sending requests. Defaults to
// `http.DefaultClient`.
HTTPClient *http.Client
// An integer value representing the logging level. The default log level
// is zero (LogOff), which represents no logging. To enable logging set
// to a LogLevel Value.
LogLevel *LogLevelType
// The logger writer interface to write logging messages to. Defaults to
// standard out.
Logger Logger
// The maximum number of times that a request will be retried for failures.
// Defaults to -1, which defers the max retry setting to the service
// specific configuration.
MaxRetries *int
// Retryer guides how HTTP requests should be retried in case of
// recoverable failures.
//
// When nil or the value does not implement the request.Retryer interface,
// the client.DefaultRetryer will be used.
//
// When both Retryer and MaxRetries are non-nil, the former is used and
// the latter ignored.
//
// To set the Retryer field in a type-safe manner and with chaining, use
// the request.WithRetryer helper function:
//
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
//
Retryer RequestRetryer
// Disables semantic parameter validation, which validates input for
// missing required fields and/or other semantic request input errors.
DisableParamValidation *bool
// Disables the computation of request and response checksums, e.g.,
// CRC32 checksums in Amazon DynamoDB.
DisableComputeChecksums *bool
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// Note: This configuration option is specific to the Amazon S3 service.
//
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// for Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
// header to PUT requests over 2MB of content. 100-Continue instructs the
// HTTP client not to send the body until the service responds with a
// `continue` status. This is useful to prevent sending the request body
// until after the request is authenticated, and validated.
//
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
//
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
// You should use this flag to disble 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
// Set this to `true` to enable S3 Accelerate feature. For all operations
// compatible with S3 Accelerate will use the accelerate endpoint for
// requests. Requests not compatible will fall back to normal S3 requests.
//
// The bucket must be enable for accelerate to be used with S3 client with
// accelerate enabled. If the bucket is not enabled for accelerate an error
// will be returned. The bucket name must be DNS compatible to also work
// with accelerate.
S3UseAccelerate *bool
// S3DisableContentMD5Validation config option is temporarily disabled,
// For S3 GetObject API calls, #1837.
//
// Set this to `true` to disable the S3 service client from automatically
// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
// will also disable the SDK from performing object ContentMD5 validation
// on GetObject API calls.
S3DisableContentMD5Validation *bool
// Set this to `true` to disable the EC2Metadata client from overriding the
// default http.Client's Timeout. This is helpful if you do not want the
// EC2Metadata client to create a new http.Client. This options is only
// meaningful if you're not already using a custom HTTP client with the
// SDK. Enabled by default.
//
// Must be set and provided to the session.NewSession() in order to disable
// the EC2Metadata overriding the timeout for default credentials chain.
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
// .WithEC2MetadataDiableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
EC2MetadataDisableTimeoutOverride *bool
// Instructs the endpoint to be generated for a service client to
// be the dual stack endpoint. The dual stack endpoint will support
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
// to make requets. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//
// If the Endpoint config value is also provided the UseDualStack flag
// will be ignored.
//
// Only supported with.
//
// sess := session.Must(session.NewSession())
//
// svc := s3.New(sess, &aws.Config{
// UseDualStack: aws.Bool(true),
// })
UseDualStack *bool
// SleepDelay is an override for the func the SDK will call when sleeping
// during the lifecycle of a request. Specifically this will be used for
// request delays. This value should only be used for testing. To adjust
// the delay of a request see the aws/client.DefaultRetryer and
// aws/request.Retryer.
//
// SleepDelay will prevent any Context from being used for canceling retry
// delay of an API operation. It is recommended to not use SleepDelay at all
// and specify a Retryer instead.
SleepDelay func(time.Duration)
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
// Will default to false. This would only be used for empty directory names in s3 requests.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
DisableRestProtocolURICleaning *bool
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
// have the definition in its model. By default, endpoint discovery is off.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// EnableEndpointDiscovery: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("/foo/bar/moo"),
// })
EnableEndpointDiscovery *bool
// DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
// request endpoint hosts with modeled information.
//
// Disabling this feature is useful when you want to use local endpoints
// for testing that do not support the modeled host prefix pattern.
DisableEndpointHostPrefix *bool
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
//
// // Create Session with MaxRetries configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(aws.NewConfig().
// WithMaxRetries(3),
// ))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, aws.NewConfig().
// WithRegion("us-west-2"),
// )
func NewConfig() *Config {
return &Config{}
}
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
// a Config pointer.
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
c.CredentialsChainVerboseErrors = &verboseErrs
return c
}
// WithCredentials sets a config Credentials value returning a Config pointer
// for chaining.
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
c.Credentials = creds
return c
}
// WithEndpoint sets a config Endpoint value returning a Config pointer for
// chaining.
func (c *Config) WithEndpoint(endpoint string) *Config {
c.Endpoint = &endpoint
return c
}
// WithEndpointResolver sets a config EndpointResolver value returning a
// Config pointer for chaining.
func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
c.EndpointResolver = resolver
return c
}
// WithRegion sets a config Region value returning a Config pointer for
// chaining.
func (c *Config) WithRegion(region string) *Config {
c.Region = &region
return c
}
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
// for chaining.
func (c *Config) WithDisableSSL(disable bool) *Config {
c.DisableSSL = &disable
return c
}
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
// for chaining.
func (c *Config) WithHTTPClient(client *http.Client) *Config {
c.HTTPClient = client
return c
}
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
// for chaining.
func (c *Config) WithMaxRetries(max int) *Config {
c.MaxRetries = &max
return c
}
// WithDisableParamValidation sets a config DisableParamValidation value
// returning a Config pointer for chaining.
func (c *Config) WithDisableParamValidation(disable bool) *Config {
c.DisableParamValidation = &disable
return c
}
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
// returning a Config pointer for chaining.
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
c.DisableComputeChecksums = &disable
return c
}
// WithLogLevel sets a config LogLevel value returning a Config pointer for
// chaining.
func (c *Config) WithLogLevel(level LogLevelType) *Config {
c.LogLevel = &level
return c
}
// WithLogger sets a config Logger value returning a Config pointer for
// chaining.
func (c *Config) WithLogger(logger Logger) *Config {
c.Logger = logger
return c
}
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
// pointer for chaining.
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
c.S3ForcePathStyle = &force
return c
}
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
// a Config pointer for chaining.
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
c.S3Disable100Continue = &disable
return c
}
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
// pointer for chaining.
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
c.S3UseAccelerate = &enable
return c
}
// WithS3DisableContentMD5Validation sets a config
// S3DisableContentMD5Validation value returning a Config pointer for chaining.
func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
c.S3DisableContentMD5Validation = &enable
return c
}
// WithUseDualStack sets a config UseDualStack value returning a Config
// pointer for chaining.
func (c *Config) WithUseDualStack(enable bool) *Config {
c.UseDualStack = &enable
return c
}
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
c.EC2MetadataDisableTimeoutOverride = &enable
return c
}
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
c.SleepDelay = fn
return c
}
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
func (c *Config) WithEndpointDiscovery(t bool) *Config {
c.EnableEndpointDiscovery = &t
return c
}
// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
// when making requests.
func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
c.DisableEndpointHostPrefix = &t
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
mergeInConfig(c, other)
}
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
}
if other.CredentialsChainVerboseErrors != nil {
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
}
if other.Credentials != nil {
dst.Credentials = other.Credentials
}
if other.Endpoint != nil {
dst.Endpoint = other.Endpoint
}
if other.EndpointResolver != nil {
dst.EndpointResolver = other.EndpointResolver
}
if other.Region != nil {
dst.Region = other.Region
}
if other.DisableSSL != nil {
dst.DisableSSL = other.DisableSSL
}
if other.HTTPClient != nil {
dst.HTTPClient = other.HTTPClient
}
if other.LogLevel != nil {
dst.LogLevel = other.LogLevel
}
if other.Logger != nil {
dst.Logger = other.Logger
}
if other.MaxRetries != nil {
dst.MaxRetries = other.MaxRetries
}
if other.Retryer != nil {
dst.Retryer = other.Retryer
}
if other.DisableParamValidation != nil {
dst.DisableParamValidation = other.DisableParamValidation
}
if other.DisableComputeChecksums != nil {
dst.DisableComputeChecksums = other.DisableComputeChecksums
}
if other.S3ForcePathStyle != nil {
dst.S3ForcePathStyle = other.S3ForcePathStyle
}
if other.S3Disable100Continue != nil {
dst.S3Disable100Continue = other.S3Disable100Continue
}
if other.S3UseAccelerate != nil {
dst.S3UseAccelerate = other.S3UseAccelerate
}
if other.S3DisableContentMD5Validation != nil {
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
}
if other.UseDualStack != nil {
dst.UseDualStack = other.UseDualStack
}
if other.EC2MetadataDisableTimeoutOverride != nil {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
}
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
if other.DisableRestProtocolURICleaning != nil {
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
}
if other.EnforceShouldRetryCheck != nil {
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
}
if other.EnableEndpointDiscovery != nil {
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
}
if other.DisableEndpointHostPrefix != nil {
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
}
}
// Copy will return a shallow copy of the Config object. If any additional
// configurations are provided they will be merged into the new config returned.
func (c *Config) Copy(cfgs ...*Config) *Config {
dst := &Config{}
dst.MergeIn(c)
for _, cfg := range cfgs {
dst.MergeIn(cfg)
}
return dst
}

@ -1,37 +0,0 @@
// +build !go1.9
package aws
import "time"
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
// It is represented as a SDK interface to enable you to use the "WithContext"
// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
//
// See https://golang.org/pkg/context on how to use contexts.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
Value(key interface{}) interface{}
}

@ -1,11 +0,0 @@
// +build go1.9
package aws
import "context"
// Context is an alias of the Go stdlib's context.Context interface.
// It can be used within the SDK's API operation "WithContext" methods.
//
// See https://golang.org/pkg/context on how to use contexts.
type Context = context.Context

@ -1,56 +0,0 @@
// +build !go1.7
package aws
import "time"
// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
// provide a 1.6 and 1.5 safe version of context that is compatible with Go
// 1.7's Context.
//
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case backgroundCtx:
return "aws.BackgroundContext"
}
return "unknown empty Context"
}
var (
backgroundCtx = new(emptyCtx)
)
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return backgroundCtx
}

@ -1,20 +0,0 @@
// +build go1.7
package aws
import "context"
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return context.Background()
}

@ -1,24 +0,0 @@
package aws
import (
"time"
)
// SleepWithContext will wait for the timer duration to expire, or the context
// is canceled. Which ever happens first. If the context is canceled the Context's
// error will be returned.
//
// Expects Context to always return a non-nil error if the Done channel is closed.
func SleepWithContext(ctx Context, dur time.Duration) error {
t := time.NewTimer(dur)
defer t.Stop()
select {
case <-t.C:
break
case <-ctx.Done():
return ctx.Err()
}
return nil
}

@ -1,918 +0,0 @@
package aws
import "time"
// String returns a pointer to the string value passed in.
func String(v string) *string {
return &v
}
// StringValue returns the value of the string pointer passed in or
// "" if the pointer is nil.
func StringValue(v *string) string {
if v != nil {
return *v
}
return ""
}
// StringSlice converts a slice of string values into a slice of
// string pointers
func StringSlice(src []string) []*string {
dst := make([]*string, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// StringValueSlice converts a slice of string pointers into a slice of
// string values
func StringValueSlice(src []*string) []string {
dst := make([]string, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// StringMap converts a string map of string values into a string
// map of string pointers
func StringMap(src map[string]string) map[string]*string {
dst := make(map[string]*string)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// StringValueMap converts a string map of string pointers into a string
// map of string values
func StringValueMap(src map[string]*string) map[string]string {
dst := make(map[string]string)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Bool returns a pointer to the bool value passed in.
func Bool(v bool) *bool {
return &v
}
// BoolValue returns the value of the bool pointer passed in or
// false if the pointer is nil.
func BoolValue(v *bool) bool {
if v != nil {
return *v
}
return false
}
// BoolSlice converts a slice of bool values into a slice of
// bool pointers
func BoolSlice(src []bool) []*bool {
dst := make([]*bool, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// BoolValueSlice converts a slice of bool pointers into a slice of
// bool values
func BoolValueSlice(src []*bool) []bool {
dst := make([]bool, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// BoolMap converts a string map of bool values into a string
// map of bool pointers
func BoolMap(src map[string]bool) map[string]*bool {
dst := make(map[string]*bool)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// BoolValueMap converts a string map of bool pointers into a string
// map of bool values
func BoolValueMap(src map[string]*bool) map[string]bool {
dst := make(map[string]bool)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int returns a pointer to the int value passed in.
func Int(v int) *int {
return &v
}
// IntValue returns the value of the int pointer passed in or
// 0 if the pointer is nil.
func IntValue(v *int) int {
if v != nil {
return *v
}
return 0
}
// IntSlice converts a slice of int values into a slice of
// int pointers
func IntSlice(src []int) []*int {
dst := make([]*int, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// IntValueSlice converts a slice of int pointers into a slice of
// int values
func IntValueSlice(src []*int) []int {
dst := make([]int, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// IntMap converts a string map of int values into a string
// map of int pointers
func IntMap(src map[string]int) map[string]*int {
dst := make(map[string]*int)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// IntValueMap converts a string map of int pointers into a string
// map of int values
func IntValueMap(src map[string]*int) map[string]int {
dst := make(map[string]int)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint returns a pointer to the uint value passed in.
func Uint(v uint) *uint {
return &v
}
// UintValue returns the value of the uint pointer passed in or
// 0 if the pointer is nil.
func UintValue(v *uint) uint {
if v != nil {
return *v
}
return 0
}
// UintSlice converts a slice of uint values uinto a slice of
// uint pointers
func UintSlice(src []uint) []*uint {
dst := make([]*uint, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// UintValueSlice converts a slice of uint pointers uinto a slice of
// uint values
func UintValueSlice(src []*uint) []uint {
dst := make([]uint, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// UintMap converts a string map of uint values uinto a string
// map of uint pointers
func UintMap(src map[string]uint) map[string]*uint {
dst := make(map[string]*uint)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// UintValueMap converts a string map of uint pointers uinto a string
// map of uint values
func UintValueMap(src map[string]*uint) map[string]uint {
dst := make(map[string]uint)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int8 returns a pointer to the int8 value passed in.
func Int8(v int8) *int8 {
return &v
}
// Int8Value returns the value of the int8 pointer passed in or
// 0 if the pointer is nil.
func Int8Value(v *int8) int8 {
if v != nil {
return *v
}
return 0
}
// Int8Slice converts a slice of int8 values into a slice of
// int8 pointers
func Int8Slice(src []int8) []*int8 {
dst := make([]*int8, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int8ValueSlice converts a slice of int8 pointers into a slice of
// int8 values
func Int8ValueSlice(src []*int8) []int8 {
dst := make([]int8, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int8Map converts a string map of int8 values into a string
// map of int8 pointers
func Int8Map(src map[string]int8) map[string]*int8 {
dst := make(map[string]*int8)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int8ValueMap converts a string map of int8 pointers into a string
// map of int8 values
func Int8ValueMap(src map[string]*int8) map[string]int8 {
dst := make(map[string]int8)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int16 returns a pointer to the int16 value passed in.
func Int16(v int16) *int16 {
return &v
}
// Int16Value returns the value of the int16 pointer passed in or
// 0 if the pointer is nil.
func Int16Value(v *int16) int16 {
if v != nil {
return *v
}
return 0
}
// Int16Slice converts a slice of int16 values into a slice of
// int16 pointers
func Int16Slice(src []int16) []*int16 {
dst := make([]*int16, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int16ValueSlice converts a slice of int16 pointers into a slice of
// int16 values
func Int16ValueSlice(src []*int16) []int16 {
dst := make([]int16, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int16Map converts a string map of int16 values into a string
// map of int16 pointers
func Int16Map(src map[string]int16) map[string]*int16 {
dst := make(map[string]*int16)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int16ValueMap converts a string map of int16 pointers into a string
// map of int16 values
func Int16ValueMap(src map[string]*int16) map[string]int16 {
dst := make(map[string]int16)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int32 returns a pointer to the int32 value passed in.
func Int32(v int32) *int32 {
return &v
}
// Int32Value returns the value of the int32 pointer passed in or
// 0 if the pointer is nil.
func Int32Value(v *int32) int32 {
if v != nil {
return *v
}
return 0
}
// Int32Slice converts a slice of int32 values into a slice of
// int32 pointers
func Int32Slice(src []int32) []*int32 {
dst := make([]*int32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int32ValueSlice converts a slice of int32 pointers into a slice of
// int32 values
func Int32ValueSlice(src []*int32) []int32 {
dst := make([]int32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int32Map converts a string map of int32 values into a string
// map of int32 pointers
func Int32Map(src map[string]int32) map[string]*int32 {
dst := make(map[string]*int32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int32ValueMap converts a string map of int32 pointers into a string
// map of int32 values
func Int32ValueMap(src map[string]*int32) map[string]int32 {
dst := make(map[string]int32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int64 returns a pointer to the int64 value passed in.
func Int64(v int64) *int64 {
return &v
}
// Int64Value returns the value of the int64 pointer passed in or
// 0 if the pointer is nil.
func Int64Value(v *int64) int64 {
if v != nil {
return *v
}
return 0
}
// Int64Slice converts a slice of int64 values into a slice of
// int64 pointers
func Int64Slice(src []int64) []*int64 {
dst := make([]*int64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int64ValueSlice converts a slice of int64 pointers into a slice of
// int64 values
func Int64ValueSlice(src []*int64) []int64 {
dst := make([]int64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int64Map converts a string map of int64 values into a string
// map of int64 pointers
func Int64Map(src map[string]int64) map[string]*int64 {
dst := make(map[string]*int64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int64ValueMap converts a string map of int64 pointers into a string
// map of int64 values
func Int64ValueMap(src map[string]*int64) map[string]int64 {
dst := make(map[string]int64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint8 returns a pointer to the uint8 value passed in.
func Uint8(v uint8) *uint8 {
return &v
}
// Uint8Value returns the value of the uint8 pointer passed in or
// 0 if the pointer is nil.
func Uint8Value(v *uint8) uint8 {
if v != nil {
return *v
}
return 0
}
// Uint8Slice converts a slice of uint8 values into a slice of
// uint8 pointers
func Uint8Slice(src []uint8) []*uint8 {
dst := make([]*uint8, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
// uint8 values
func Uint8ValueSlice(src []*uint8) []uint8 {
dst := make([]uint8, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint8Map converts a string map of uint8 values into a string
// map of uint8 pointers
func Uint8Map(src map[string]uint8) map[string]*uint8 {
dst := make(map[string]*uint8)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint8ValueMap converts a string map of uint8 pointers into a string
// map of uint8 values
func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
dst := make(map[string]uint8)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint16 returns a pointer to the uint16 value passed in.
func Uint16(v uint16) *uint16 {
return &v
}
// Uint16Value returns the value of the uint16 pointer passed in or
// 0 if the pointer is nil.
func Uint16Value(v *uint16) uint16 {
if v != nil {
return *v
}
return 0
}
// Uint16Slice converts a slice of uint16 values into a slice of
// uint16 pointers
func Uint16Slice(src []uint16) []*uint16 {
dst := make([]*uint16, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
// uint16 values
func Uint16ValueSlice(src []*uint16) []uint16 {
dst := make([]uint16, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint16Map converts a string map of uint16 values into a string
// map of uint16 pointers
func Uint16Map(src map[string]uint16) map[string]*uint16 {
dst := make(map[string]*uint16)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint16ValueMap converts a string map of uint16 pointers into a string
// map of uint16 values
func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
dst := make(map[string]uint16)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint32 returns a pointer to the uint32 value passed in.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint32Value returns the value of the uint32 pointer passed in or
// 0 if the pointer is nil.
func Uint32Value(v *uint32) uint32 {
if v != nil {
return *v
}
return 0
}
// Uint32Slice converts a slice of uint32 values into a slice of
// uint32 pointers
func Uint32Slice(src []uint32) []*uint32 {
dst := make([]*uint32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
// uint32 values
func Uint32ValueSlice(src []*uint32) []uint32 {
dst := make([]uint32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint32Map converts a string map of uint32 values into a string
// map of uint32 pointers
func Uint32Map(src map[string]uint32) map[string]*uint32 {
dst := make(map[string]*uint32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint32ValueMap converts a string map of uint32 pointers into a string
// map of uint32 values
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
dst := make(map[string]uint32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint64 returns a pointer to the uint64 value passed in.
func Uint64(v uint64) *uint64 {
return &v
}
// Uint64Value returns the value of the uint64 pointer passed in or
// 0 if the pointer is nil.
func Uint64Value(v *uint64) uint64 {
if v != nil {
return *v
}
return 0
}
// Uint64Slice converts a slice of uint64 values into a slice of
// uint64 pointers
func Uint64Slice(src []uint64) []*uint64 {
dst := make([]*uint64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
// uint64 values
func Uint64ValueSlice(src []*uint64) []uint64 {
dst := make([]uint64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint64Map converts a string map of uint64 values into a string
// map of uint64 pointers
func Uint64Map(src map[string]uint64) map[string]*uint64 {
dst := make(map[string]*uint64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint64ValueMap converts a string map of uint64 pointers into a string
// map of uint64 values
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
dst := make(map[string]uint64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float32 returns a pointer to the float32 value passed in.
func Float32(v float32) *float32 {
return &v
}
// Float32Value returns the value of the float32 pointer passed in or
// 0 if the pointer is nil.
func Float32Value(v *float32) float32 {
if v != nil {
return *v
}
return 0
}
// Float32Slice converts a slice of float32 values into a slice of
// float32 pointers
func Float32Slice(src []float32) []*float32 {
dst := make([]*float32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float32ValueSlice converts a slice of float32 pointers into a slice of
// float32 values
func Float32ValueSlice(src []*float32) []float32 {
dst := make([]float32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float32Map converts a string map of float32 values into a string
// map of float32 pointers
func Float32Map(src map[string]float32) map[string]*float32 {
dst := make(map[string]*float32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float32ValueMap converts a string map of float32 pointers into a string
// map of float32 values
func Float32ValueMap(src map[string]*float32) map[string]float32 {
dst := make(map[string]float32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float64 returns a pointer to the float64 value passed in.
func Float64(v float64) *float64 {
return &v
}
// Float64Value returns the value of the float64 pointer passed in or
// 0 if the pointer is nil.
func Float64Value(v *float64) float64 {
if v != nil {
return *v
}
return 0
}
// Float64Slice converts a slice of float64 values into a slice of
// float64 pointers
func Float64Slice(src []float64) []*float64 {
dst := make([]*float64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float64ValueSlice converts a slice of float64 pointers into a slice of
// float64 values
func Float64ValueSlice(src []*float64) []float64 {
dst := make([]float64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float64Map converts a string map of float64 values into a string
// map of float64 pointers
func Float64Map(src map[string]float64) map[string]*float64 {
dst := make(map[string]*float64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float64ValueMap converts a string map of float64 pointers into a string
// map of float64 values
func Float64ValueMap(src map[string]*float64) map[string]float64 {
dst := make(map[string]float64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Time returns a pointer to the time.Time value passed in.
func Time(v time.Time) *time.Time {
return &v
}
// TimeValue returns the value of the time.Time pointer passed in or
// time.Time{} if the pointer is nil.
func TimeValue(v *time.Time) time.Time {
if v != nil {
return *v
}
return time.Time{}
}
// SecondsTimeValue converts an int64 pointer to a time.Time value
// representing seconds since Epoch or time.Time{} if the pointer is nil.
func SecondsTimeValue(v *int64) time.Time {
if v != nil {
return time.Unix((*v / 1000), 0)
}
return time.Time{}
}
// MillisecondsTimeValue converts an int64 pointer to a time.Time value
// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
func MillisecondsTimeValue(v *int64) time.Time {
if v != nil {
return time.Unix(0, (*v * 1000000))
}
return time.Time{}
}
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
// The result is undefined if the Unix time cannot be represented by an int64.
// Which includes calling TimeUnixMilli on a zero Time is undefined.
//
// This utility is useful for service API's such as CloudWatch Logs which require
// their unix time values to be in milliseconds.
//
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
func TimeUnixMilli(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
}
// TimeSlice converts a slice of time.Time values into a slice of
// time.Time pointers
func TimeSlice(src []time.Time) []*time.Time {
dst := make([]*time.Time, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// TimeValueSlice converts a slice of time.Time pointers into a slice of
// time.Time values
func TimeValueSlice(src []*time.Time) []time.Time {
dst := make([]time.Time, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// TimeMap converts a string map of time.Time values into a string
// map of time.Time pointers
func TimeMap(src map[string]time.Time) map[string]*time.Time {
dst := make(map[string]*time.Time)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// TimeValueMap converts a string map of time.Time pointers into a string
// map of time.Time values
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
dst := make(map[string]time.Time)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}

@ -1,230 +0,0 @@
package corehandlers
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
)
// Interface for matching types which also have a Len method.
type lener interface {
Len() int
}
// BuildContentLengthHandler builds the content length of a request based on the body,
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
// to determine request body length and no "Content-Length" was specified it will panic.
//
// The Content-Length will only be added to the request if the length of the body
// is greater than 0. If the body is empty or the current `Content-Length`
// header is <= 0, the header will also be stripped.
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
var length int64
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
length, _ = strconv.ParseInt(slength, 10, 64)
} else {
if r.Body != nil {
var err error
length, err = aws.SeekerLen(r.Body)
if err != nil {
r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
return
}
}
}
if length > 0 {
r.HTTPRequest.ContentLength = length
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
} else {
r.HTTPRequest.ContentLength = 0
r.HTTPRequest.Header.Del("Content-Length")
}
}}
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// ValidateReqSigHandler is a request handler to ensure that the request's
// signature doesn't expire before it is sent. This can happen when a request
// is built and signed significantly before it is sent. Or significant delays
// occur when retrying requests that would cause the signature to expire.
var ValidateReqSigHandler = request.NamedHandler{
Name: "core.ValidateReqSigHandler",
Fn: func(r *request.Request) {
// Unsigned requests are not signed
if r.Config.Credentials == credentials.AnonymousCredentials {
return
}
signedTime := r.Time
if !r.LastSignedAt.IsZero() {
signedTime = r.LastSignedAt
}
// 5 minutes to allow for some clock skew/delays in transmission.
// Would be improved with aws/aws-sdk-go#423
if signedTime.Add(5 * time.Minute).After(time.Now()) {
return
}
fmt.Println("request expired, resigning")
r.Sign()
},
}
// SendHandler is a request handler to send service request using HTTP client.
var SendHandler = request.NamedHandler{
Name: "core.SendHandler",
Fn: func(r *request.Request) {
sender := sendFollowRedirects
if r.DisableFollowRedirects {
sender = sendWithoutFollowRedirects
}
if request.NoBody == r.HTTPRequest.Body {
// Strip off the request body if the NoBody reader was used as a
// place holder for a request body. This prevents the SDK from
// making requests with a request body when it would be invalid
// to do so.
//
// Use a shallow copy of the http.Request to ensure the race condition
// of transport on Body will not trigger
reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
reqCopy.Body = nil
r.HTTPRequest = &reqCopy
defer func() {
r.HTTPRequest = reqOrig
}()
}
var err error
r.HTTPResponse, err = sender(r)
if err != nil {
handleSendError(r, err)
}
},
}
func sendFollowRedirects(r *request.Request) (*http.Response, error) {
return r.Config.HTTPClient.Do(r.HTTPRequest)
}
func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
transport := r.Config.HTTPClient.Transport
if transport == nil {
transport = http.DefaultTransport
}
return transport.RoundTrip(r.HTTPRequest)
}
func handleSendError(r *request.Request, err error) {
// Prevent leaking if an HTTPResponse was returned. Clean up
// the body.
if r.HTTPResponse != nil {
r.HTTPResponse.Body.Close()
}
// Capture the case where url.Error is returned for error processing
// response. e.g. 301 without location header comes back as string
// error and r.HTTPResponse is nil. Other URL redirect errors will
// comeback in a similar method.
if e, ok := err.(*url.Error); ok && e.Err != nil {
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
code, _ := strconv.ParseInt(s[1], 10, 64)
r.HTTPResponse = &http.Response{
StatusCode: int(code),
Status: http.StatusText(int(code)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
return
}
}
if r.HTTPResponse == nil {
// Add a dummy request response object to ensure the HTTPResponse
// value is consistent.
r.HTTPResponse = &http.Response{
StatusCode: int(0),
Status: http.StatusText(int(0)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
}
// Catch all request errors, and let the default retrier determine
// if the error is retryable.
r.Error = awserr.New("RequestError", "send request failed", err)
// Override the error with a context canceled error, if that was canceled.
ctx := r.Context()
select {
case <-ctx.Done():
r.Error = awserr.New(request.CanceledErrorCode,
"request context canceled", ctx.Err())
r.Retryable = aws.Bool(false)
default:
}
}
// ValidateResponseHandler is a request handler to validate service response.
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
// this may be replaced by an UnmarshalError handler
r.Error = awserr.New("UnknownError", "unknown error", nil)
}
}}
// AfterRetryHandler performs final checks to determine if the request should
// be retried and how long to delay.
var AfterRetryHandler = request.NamedHandler{
Name: "core.AfterRetryHandler",
Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
// Support SleepDelay for backwards compatibility and testing
sleepFn(r.RetryDelay)
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
r.Error = awserr.New(request.CanceledErrorCode,
"request context canceled", err)
r.Retryable = aws.Bool(false)
return
}
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
r.Config.Credentials.Expire()
}
r.RetryCount++
r.Error = nil
}
}}
// ValidateEndpointHandler is a request handler to validate a request had the
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
// region is not valid.
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}}

@ -1,17 +0,0 @@
package corehandlers
import "github.com/aws/aws-sdk-go/aws/request"
// ValidateParametersHandler is a request handler to validate the input parameters.
// Validating parameters only has meaning if done prior to the request being sent.
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
if !r.ParamsFilled() {
return
}
if v, ok := r.Params.(request.Validator); ok {
if err := v.Validate(); err != nil {
r.Error = err
}
}
}}

@ -1,37 +0,0 @@
package corehandlers
import (
"os"
"runtime"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
// to the user agent.
var SDKVersionUserAgentHandler = request.NamedHandler{
Name: "core.SDKVersionUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
runtime.Version(), runtime.GOOS, runtime.GOARCH),
}
const execEnvVar = `AWS_EXECUTION_ENV`
const execEnvUAKey = `exec-env`
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
// execution environment to the user agent.
//
// If the environment variable AWS_EXECUTION_ENV is set, its value will be
// appended to the user agent string.
var AddHostExecEnvUserAgentHander = request.NamedHandler{
Name: "core.AddHostExecEnvUserAgentHander",
Fn: func(r *request.Request) {
v := os.Getenv(execEnvVar)
if len(v) == 0 {
return
}
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
},
}

@ -1,100 +0,0 @@
package credentials
import (
"github.com/aws/aws-sdk-go/aws/awserr"
)
var (
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
// providers in the ChainProvider.
//
// This has been deprecated. For verbose error messaging set
// aws.Config.CredentialsChainVerboseErrors to true.
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
`no valid providers in chain. Deprecated.
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
nil)
)
// A ChainProvider will search for a provider which returns credentials
// and cache that provider until Retrieve is called again.
//
// The ChainProvider provides a way of chaining multiple providers together
// which will pick the first available using priority order of the Providers
// in the list.
//
// If none of the Providers retrieve valid credentials Value, ChainProvider's
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
//
// If a Provider is found which returns valid credentials Value ChainProvider
// will cache that Provider for all calls to IsExpired(), until Retrieve is
// called again.
//
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
// In this example EnvProvider will first check if any credentials are available
// via the environment variables. If there are none ChainProvider will check
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
// does not return any credentials ChainProvider will return the error
// ErrNoValidProvidersFoundInChain
//
// creds := credentials.NewChainCredentials(
// []credentials.Provider{
// &credentials.EnvProvider{},
// &ec2rolecreds.EC2RoleProvider{
// Client: ec2metadata.New(sess),
// },
// })
//
// // Usage of ChainCredentials with aws.Config
// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
// Credentials: creds,
// })))
//
type ChainProvider struct {
Providers []Provider
curr Provider
VerboseErrors bool
}
// NewChainCredentials returns a pointer to a new Credentials object
// wrapping a chain of providers.
func NewChainCredentials(providers []Provider) *Credentials {
return NewCredentials(&ChainProvider{
Providers: append([]Provider{}, providers...),
})
}
// Retrieve returns the credentials value or error if no provider returned
// without error.
//
// If a provider is found it will be cached and any calls to IsExpired()
// will return the expired state of the cached provider.
func (c *ChainProvider) Retrieve() (Value, error) {
var errs []error
for _, p := range c.Providers {
creds, err := p.Retrieve()
if err == nil {
c.curr = p
return creds, nil
}
errs = append(errs, err)
}
c.curr = nil
var err error
err = ErrNoValidProvidersFoundInChain
if c.VerboseErrors {
err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
}
return Value{}, err
}
// IsExpired will returned the expired state of the currently cached provider
// if there is one. If there is no current provider, true will be returned.
func (c *ChainProvider) IsExpired() bool {
if c.curr != nil {
return c.curr.IsExpired()
}
return true
}

@ -1,299 +0,0 @@
// Package credentials provides credential retrieval and management
//
// The Credentials is the primary method of getting access to and managing
// credentials Values. Using dependency injection retrieval of the credential
// values is handled by a object which satisfies the Provider interface.
//
// By default the Credentials.Get() will cache the successful result of a
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
// point Credentials will call Provider's Retrieve() to get new credential Value.
//
// The Provider is responsible for determining when credentials Value have expired.
// It is also important to note that Credentials will always call Retrieve the
// first time Credentials.Get() is called.
//
// Example of using the environment variable credentials.
//
// creds := credentials.NewEnvCredentials()
//
// // Retrieve the credentials value
// credValue, err := creds.Get()
// if err != nil {
// // handle error
// }
//
// Example of forcing credentials to expire and be refreshed on the next Get().
// This may be helpful to proactively expire credentials and refresh them sooner
// than they would naturally expire on their own.
//
// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
// creds.Expire()
// credsValue, err := creds.Get()
// // New credentials will be retrieved instead of from cache.
//
//
// Custom Provider
//
// Each Provider built into this package also provides a helper method to generate
// a Credentials pointer setup with the provider. To use a custom Provider just
// create a type which satisfies the Provider interface and pass it to the
// NewCredentials method.
//
// type MyProvider struct{}
// func (m *MyProvider) Retrieve() (Value, error) {...}
// func (m *MyProvider) IsExpired() bool {...}
//
// creds := credentials.NewCredentials(&MyProvider{})
// credValue, err := creds.Get()
//
package credentials
import (
"fmt"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// AnonymousCredentials is an empty Credential object that can be used as
// dummy placeholder credentials for requests that do not need signed.
//
// This Credentials can be used to configure a service to not sign requests
// when making service API calls. For example, when accessing public
// s3 buckets.
//
// svc := s3.New(session.Must(session.NewSession(&aws.Config{
// Credentials: credentials.AnonymousCredentials,
// })))
// // Access public S3 buckets.
var AnonymousCredentials = NewStaticCredentials("", "", "")
// A Value is the AWS credentials value for individual credential fields.
type Value struct {
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
// AWS Session Token
SessionToken string
// Provider used to get credentials
ProviderName string
}
// HasKeys returns if the credentials Value has both AccessKeyID and
// SecretAccessKey value set.
func (v Value) HasKeys() bool {
return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
}
// A Provider is the interface for any component which will provide credentials
// Value. A provider is required to manage its own Expired state, and what to
// be expired means.
//
// The Provider should not need to implement its own mutexes, because
// that will be managed by Credentials.
type Provider interface {
// Retrieve returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve() (Value, error)
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
IsExpired() bool
}
// An Expirer is an interface that Providers can implement to expose the expiration
// time, if known. If the Provider cannot accurately provide this info,
// it should not implement this interface.
type Expirer interface {
// The time at which the credentials are no longer valid
ExpiresAt() time.Time
}
// An ErrorProvider is a stub credentials provider that always returns an error
// this is used by the SDK when construction a known provider is not possible
// due to an error.
type ErrorProvider struct {
// The error to be returned from Retrieve
Err error
// The provider name to set on the Retrieved returned Value
ProviderName string
}
// Retrieve will always return the error that the ErrorProvider was created with.
func (p ErrorProvider) Retrieve() (Value, error) {
return Value{ProviderName: p.ProviderName}, p.Err
}
// IsExpired will always return not expired.
func (p ErrorProvider) IsExpired() bool {
return false
}
// A Expiry provides shared expiration logic to be used by credentials
// providers to implement expiry functionality.
//
// The best method to use this struct is as an anonymous field within the
// provider's struct.
//
// Example:
// type EC2RoleProvider struct {
// Expiry
// ...
// }
type Expiry struct {
// The date/time when to expire on
expiration time.Time
// If set will be used by IsExpired to determine the current time.
// Defaults to time.Now if CurrentTime is not set. Available for testing
// to be able to mock out the current time.
CurrentTime func() time.Time
}
// SetExpiration sets the expiration IsExpired will check when called.
//
// If window is greater than 0 the expiration time will be reduced by the
// window value.
//
// Using a window is helpful to trigger credentials to expire sooner than
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
e.expiration = expiration
if window > 0 {
e.expiration = e.expiration.Add(-window)
}
}
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
curTime := e.CurrentTime
if curTime == nil {
curTime = time.Now
}
return e.expiration.Before(curTime())
}
// ExpiresAt returns the expiration time of the credential
func (e *Expiry) ExpiresAt() time.Time {
return e.expiration
}
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//
// Credentials is safe to use across multiple goroutines and will manage the
// synchronous state so the Providers do not need to implement their own
// synchronization.
//
// The first Credentials.Get() will always call Provider.Retrieve() to get the
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
creds Value
forceRefresh bool
m sync.RWMutex
provider Provider
}
// NewCredentials returns a pointer to a new Credentials with the provider set.
func NewCredentials(provider Provider) *Credentials {
return &Credentials{
provider: provider,
forceRefresh: true,
}
}
// Get returns the credentials value, or error if the credentials Value failed
// to be retrieved.
//
// Will return the cached credentials Value if it has not expired. If the
// credentials Value has expired the Provider's Retrieve() will be called
// to refresh the credentials.
//
// If Credentials.Expire() was called the credentials Value will be force
// expired, and the next call to Get() will cause them to be refreshed.
func (c *Credentials) Get() (Value, error) {
// Check the cached credentials first with just the read lock.
c.m.RLock()
if !c.isExpired() {
creds := c.creds
c.m.RUnlock()
return creds, nil
}
c.m.RUnlock()
// Credentials are expired need to retrieve the credentials taking the full
// lock.
c.m.Lock()
defer c.m.Unlock()
if c.isExpired() {
creds, err := c.provider.Retrieve()
if err != nil {
return Value{}, err
}
c.creds = creds
c.forceRefresh = false
}
return c.creds, nil
}
// Expire expires the credentials and forces them to be retrieved on the
// next call to Get().
//
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
c.m.Lock()
defer c.m.Unlock()
c.forceRefresh = true
}
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
//
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
c.m.RLock()
defer c.m.RUnlock()
return c.isExpired()
}
// isExpired helper method wrapping the definition of expired credentials.
func (c *Credentials) isExpired() bool {
return c.forceRefresh || c.provider.IsExpired()
}
// ExpiresAt provides access to the functionality of the Expirer interface of
// the underlying Provider, if it supports that interface. Otherwise, it returns
// an error.
func (c *Credentials) ExpiresAt() (time.Time, error) {
c.m.RLock()
defer c.m.RUnlock()
expirer, ok := c.provider.(Expirer)
if !ok {
return time.Time{}, awserr.New("ProviderNotExpirer",
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
nil)
}
if c.forceRefresh {
// set expiration time to the distant past
return time.Time{}, nil
}
return expirer.ExpiresAt(), nil
}

@ -1,180 +0,0 @@
package ec2rolecreds
import (
"bufio"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
// ProviderName provides a name of EC2Role provider
const ProviderName = "EC2RoleProvider"
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
//
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
// or ExpiryWindow
//
// p := &ec2rolecreds.EC2RoleProvider{
// // Pass in a custom timeout to be used when requesting
// // IAM EC2 Role credentials.
// Client: ec2metadata.New(sess, aws.Config{
// HTTPClient: &http.Client{Timeout: 10 * time.Second},
// }),
//
// // Do not use early expiry of credentials. If a non zero value is
// // specified the credentials will be expired early
// ExpiryWindow: 0,
// }
type EC2RoleProvider struct {
credentials.Expiry
// Required EC2Metadata client to use when connecting to EC2 metadata service.
Client *ec2metadata.EC2Metadata
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
// The ConfigProvider is satisfied by the session.Session type.
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
p := &EC2RoleProvider{
Client: ec2metadata.New(c),
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
// metadata service.
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
p := &EC2RoleProvider{
Client: client,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired credentials.
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
credsList, err := requestCredList(m.Client)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
if len(credsList) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
}
credsName := credsList[0]
roleCreds, err := requestCred(m.Client, credsName)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
return credentials.Value{
AccessKeyID: roleCreds.AccessKeyID,
SecretAccessKey: roleCreds.SecretAccessKey,
SessionToken: roleCreds.Token,
ProviderName: ProviderName,
}, nil
}
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
Expiration time.Time
AccessKeyID string
SecretAccessKey string
Token string
// Error state
Code string
Message string
}
const iamSecurityCredsPath = "iam/security-credentials/"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
resp, err := client.GetMetadata(iamSecurityCredsPath)
if err != nil {
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
}
credsList := []string{}
s := bufio.NewScanner(strings.NewReader(resp))
for s.Scan() {
credsList = append(credsList, s.Text())
}
if err := s.Err(); err != nil {
return nil, awserr.New(request.ErrCodeSerialization,
"failed to read EC2 instance role from metadata service", err)
}
return credsList, nil
}
// requestCred requests the credentials for a specific credentials from the EC2 service.
//
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
if err != nil {
return ec2RoleCredRespBody{},
awserr.New("EC2RoleRequestError",
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
err)
}
respCreds := ec2RoleCredRespBody{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{},
awserr.New(request.ErrCodeSerialization,
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
err)
}
if respCreds.Code != "Success" {
// If an error code was returned something failed requesting the role.
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
}
return respCreds, nil
}

@ -1,203 +0,0 @@
// Package endpointcreds provides support for retrieving credentials from an
// arbitrary HTTP endpoint.
//
// The credentials endpoint Provider can receive both static and refreshable
// credentials that will expire. Credentials are static when an "Expiration"
// value is not provided in the endpoint's response.
//
// Static credentials will never expire once they have been retrieved. The format
// of the static credentials response:
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// }
//
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
// value in the response. The format of the refreshable credentials response:
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// "Token" : "AQoDY....=",
// "Expiration" : "2016-02-25T06:03:31Z"
// }
//
// Errors should be returned in the following format and only returned with 400
// or 500 HTTP status codes.
// {
// "code": "ErrorCode",
// "message": "Helpful error message."
// }
package endpointcreds
import (
"encoding/json"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
)
// ProviderName is the name of the credentials provider.
const ProviderName = `CredentialsEndpointProvider`
// Provider satisfies the credentials.Provider interface, and is a client to
// retrieve credentials from an arbitrary endpoint.
type Provider struct {
staticCreds bool
credentials.Expiry
// Requires a AWS Client to make HTTP requests to the endpoint with.
// the Endpoint the request will be made to is provided by the aws.Config's
// Endpoint value.
Client *client.Client
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
AuthorizationToken string
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
// from arbitrary endpoint.
func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
p := &Provider{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "CredentialsEndpoint",
Endpoint: endpoint,
},
handlers,
),
}
p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
p.Client.Handlers.Validate.Clear()
p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
for _, option := range options {
option(p)
}
return p
}
// NewCredentialsClient returns a pointer to a new Credentials object
// wrapping the endpoint credentials Provider.
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
}
// IsExpired returns true if the credentials retrieved are expired, or not yet
// retrieved.
func (p *Provider) IsExpired() bool {
if p.staticCreds {
return false
}
return p.Expiry.IsExpired()
}
// Retrieve will attempt to request the credentials from the endpoint the Provider
// was configured for. And error will be returned if the retrieval fails.
func (p *Provider) Retrieve() (credentials.Value, error) {
resp, err := p.getCredentials()
if err != nil {
return credentials.Value{ProviderName: ProviderName},
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
}
if resp.Expiration != nil {
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
} else {
p.staticCreds = true
}
return credentials.Value{
AccessKeyID: resp.AccessKeyID,
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.Token,
ProviderName: ProviderName,
}, nil
}
type getCredentialsOutput struct {
Expiration *time.Time
AccessKeyID string
SecretAccessKey string
Token string
}
type errorOutput struct {
Code string `json:"code"`
Message string `json:"message"`
}
func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
op := &request.Operation{
Name: "GetCredentials",
HTTPMethod: "GET",
}
out := &getCredentialsOutput{}
req := p.Client.NewRequest(op, nil, out)
req.HTTPRequest.Header.Set("Accept", "application/json")
if authToken := p.AuthorizationToken; len(authToken) != 0 {
req.HTTPRequest.Header.Set("Authorization", authToken)
}
return out, req.Send()
}
func validateEndpointHandler(r *request.Request) {
if len(r.ClientInfo.Endpoint) == 0 {
r.Error = aws.ErrMissingEndpoint
}
}
func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
out := r.Data.(*getCredentialsOutput)
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
r.Error = awserr.New(request.ErrCodeSerialization,
"failed to decode endpoint credentials",
err,
)
}
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
var errOut errorOutput
err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization,
"failed to decode error message", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
r.Error = awserr.New(errOut.Code, errOut.Message, nil)
}

@ -1,74 +0,0 @@
package credentials
import (
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// EnvProviderName provides a name of Env provider
const EnvProviderName = "EnvProvider"
var (
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
// found in the process's environment.
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
// can't be found in the process's environment.
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
)
// A EnvProvider retrieves credentials from the environment variables of the
// running process. Environment credentials never expire.
//
// Environment variables used:
//
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
//
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
type EnvProvider struct {
retrieved bool
}
// NewEnvCredentials returns a pointer to a new Credentials object
// wrapping the environment variable provider.
func NewEnvCredentials() *Credentials {
return NewCredentials(&EnvProvider{})
}
// Retrieve retrieves the keys from the environment.
func (e *EnvProvider) Retrieve() (Value, error) {
e.retrieved = false
id := os.Getenv("AWS_ACCESS_KEY_ID")
if id == "" {
id = os.Getenv("AWS_ACCESS_KEY")
}
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
if secret == "" {
secret = os.Getenv("AWS_SECRET_KEY")
}
if id == "" {
return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
}
if secret == "" {
return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
}
e.retrieved = true
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
ProviderName: EnvProviderName,
}, nil
}
// IsExpired returns if the credentials have been retrieved.
func (e *EnvProvider) IsExpired() bool {
return !e.retrieved
}

@ -1,12 +0,0 @@
[default]
aws_access_key_id = accessKey
aws_secret_access_key = secret
aws_session_token = token
[no_token]
aws_access_key_id = accessKey
aws_secret_access_key = secret
[with_colon]
aws_access_key_id: accessKey
aws_secret_access_key: secret

@ -1,425 +0,0 @@
/*
Package processcreds is a credential Provider to retrieve `credential_process`
credentials.
WARNING: The following describes a method of sourcing credentials from an external
process. This can potentially be dangerous, so proceed with caution. Other
credential providers should be preferred if at all possible. If using this
option, you should make sure that the config file is as locked down as possible
using security best practices for your operating system.
You can use credentials from a `credential_process` in a variety of ways.
One way is to setup your shared config file, located in the default
location, with the `credential_process` key and the command you want to be
called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
[default]
credential_process = /command/to/call
Creating a new session will use the credential process to retrieve credentials.
NOTE: If there are credentials in the profile you are using, the credential
process will not be used.
// Initialize a session to load credentials.
sess, _ := session.NewSession(&aws.Config{
Region: aws.String("us-east-1")},
)
// Create S3 service client to use the credentials.
svc := s3.New(sess)
Another way to use the `credential_process` method is by using
`credentials.NewCredentials()` and providing a command to be executed to
retrieve credentials:
// Create credentials using the ProcessProvider.
creds := processcreds.NewCredentials("/path/to/command")
// Create service client value configured for credentials.
svc := s3.New(sess, &aws.Config{Credentials: creds})
You can set a non-default timeout for the `credential_process` with another
constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
set a one minute timeout:
// Create credentials using the ProcessProvider.
creds := processcreds.NewCredentialsTimeout(
"/path/to/command",
time.Duration(500) * time.Millisecond)
If you need more control, you can set any configurable options in the
credentials using one or more option functions. For example, you can set a two
minute timeout, a credential duration of 60 minutes, and a maximum stdout
buffer size of 2k.
creds := processcreds.NewCredentials(
"/path/to/command",
func(opt *ProcessProvider) {
opt.Timeout = time.Duration(2) * time.Minute
opt.Duration = time.Duration(60) * time.Minute
opt.MaxBufSize = 2048
})
You can also use your own `exec.Cmd`:
// Create an exec.Cmd
myCommand := exec.Command("/path/to/command")
// Create credentials using your exec.Cmd and custom timeout
creds := processcreds.NewCredentialsCommand(
myCommand,
func(opt *processcreds.ProcessProvider) {
opt.Timeout = time.Duration(1) * time.Second
})
*/
package processcreds
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"runtime"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
)
const (
// ProviderName is the name this credentials provider will label any
// returned credentials Value with.
ProviderName = `ProcessProvider`
// ErrCodeProcessProviderParse error parsing process output
ErrCodeProcessProviderParse = "ProcessProviderParseError"
// ErrCodeProcessProviderVersion version error in output
ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
// ErrCodeProcessProviderRequired required attribute missing in output
ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
// ErrCodeProcessProviderExecution execution of command failed
ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
// errMsgProcessProviderTimeout process took longer than allowed
errMsgProcessProviderTimeout = "credential process timed out"
// errMsgProcessProviderProcess process error
errMsgProcessProviderProcess = "error in credential_process"
// errMsgProcessProviderParse problem parsing output
errMsgProcessProviderParse = "parse failed of credential_process output"
// errMsgProcessProviderVersion version error in output
errMsgProcessProviderVersion = "wrong version in process output (not 1)"
// errMsgProcessProviderMissKey missing access key id in output
errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
// errMsgProcessProviderMissSecret missing secret acess key in output
errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
// errMsgProcessProviderPrepareCmd prepare of command failed
errMsgProcessProviderPrepareCmd = "failed to prepare command"
// errMsgProcessProviderEmptyCmd command must not be empty
errMsgProcessProviderEmptyCmd = "command must not be empty"
// errMsgProcessProviderPipe failed to initialize pipe
errMsgProcessProviderPipe = "failed to initialize pipe"
// DefaultDuration is the default amount of time in minutes that the
// credentials will be valid for.
DefaultDuration = time.Duration(15) * time.Minute
// DefaultBufSize limits buffer size from growing to an enormous
// amount due to a faulty process.
DefaultBufSize = 1024
// DefaultTimeout default limit on time a process can run.
DefaultTimeout = time.Duration(1) * time.Minute
)
// ProcessProvider satisfies the credentials.Provider interface, and is a
// client to retrieve credentials from a process.
type ProcessProvider struct {
staticCreds bool
credentials.Expiry
originalCommand []string
// Expiry duration of the credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// A string representing an os command that should return a JSON with
// credential information.
command *exec.Cmd
// MaxBufSize limits memory usage from growing to an enormous
// amount due to a faulty process.
MaxBufSize int
// Timeout limits the time a process can run.
Timeout time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// ProcessProvider. The credentials will expire every 15 minutes by default.
func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
p := &ProcessProvider{
command: exec.Command(command),
Duration: DefaultDuration,
Timeout: DefaultTimeout,
MaxBufSize: DefaultBufSize,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsTimeout returns a pointer to a new Credentials object with
// the specified command and timeout, and default duration and max buffer size.
func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
p := NewCredentials(command, func(opt *ProcessProvider) {
opt.Timeout = timeout
})
return p
}
// NewCredentialsCommand returns a pointer to a new Credentials object with
// the specified command, and default timeout, duration and max buffer size.
func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
p := &ProcessProvider{
command: command,
Duration: DefaultDuration,
Timeout: DefaultTimeout,
MaxBufSize: DefaultBufSize,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
type credentialProcessResponse struct {
Version int
AccessKeyID string `json:"AccessKeyId"`
SecretAccessKey string
SessionToken string
Expiration *time.Time
}
// Retrieve executes the 'credential_process' and returns the credentials.
func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
out, err := p.executeCredentialProcess()
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
// Serialize and validate response
resp := &credentialProcessResponse{}
if err = json.Unmarshal(out, resp); err != nil {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderParse,
fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
err)
}
if resp.Version != 1 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderVersion,
errMsgProcessProviderVersion,
nil)
}
if len(resp.AccessKeyID) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderRequired,
errMsgProcessProviderMissKey,
nil)
}
if len(resp.SecretAccessKey) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderRequired,
errMsgProcessProviderMissSecret,
nil)
}
// Handle expiration
p.staticCreds = resp.Expiration == nil
if resp.Expiration != nil {
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
}
return credentials.Value{
ProviderName: ProviderName,
AccessKeyID: resp.AccessKeyID,
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.SessionToken,
}, nil
}
// IsExpired returns true if the credentials retrieved are expired, or not yet
// retrieved.
func (p *ProcessProvider) IsExpired() bool {
if p.staticCreds {
return false
}
return p.Expiry.IsExpired()
}
// prepareCommand prepares the command to be executed.
func (p *ProcessProvider) prepareCommand() error {
var cmdArgs []string
if runtime.GOOS == "windows" {
cmdArgs = []string{"cmd.exe", "/C"}
} else {
cmdArgs = []string{"sh", "-c"}
}
if len(p.originalCommand) == 0 {
p.originalCommand = make([]string, len(p.command.Args))
copy(p.originalCommand, p.command.Args)
// check for empty command because it succeeds
if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
return awserr.New(
ErrCodeProcessProviderExecution,
fmt.Sprintf(
"%s: %s",
errMsgProcessProviderPrepareCmd,
errMsgProcessProviderEmptyCmd),
nil)
}
}
cmdArgs = append(cmdArgs, p.originalCommand...)
p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
p.command.Env = os.Environ()
return nil
}
// executeCredentialProcess starts the credential process on the OS and
// returns the results or an error.
func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
if err := p.prepareCommand(); err != nil {
return nil, err
}
// Setup the pipes
outReadPipe, outWritePipe, err := os.Pipe()
if err != nil {
return nil, awserr.New(
ErrCodeProcessProviderExecution,
errMsgProcessProviderPipe,
err)
}
p.command.Stderr = os.Stderr // display stderr on console for MFA
p.command.Stdout = outWritePipe // get creds json on process's stdout
p.command.Stdin = os.Stdin // enable stdin for MFA
output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
stdoutCh := make(chan error, 1)
go readInput(
io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
output,
stdoutCh)
execCh := make(chan error, 1)
go executeCommand(*p.command, execCh)
finished := false
var errors []error
for !finished {
select {
case readError := <-stdoutCh:
errors = appendError(errors, readError)
finished = true
case execError := <-execCh:
err := outWritePipe.Close()
errors = appendError(errors, err)
errors = appendError(errors, execError)
if errors != nil {
return output.Bytes(), awserr.NewBatchError(
ErrCodeProcessProviderExecution,
errMsgProcessProviderProcess,
errors)
}
case <-time.After(p.Timeout):
finished = true
return output.Bytes(), awserr.NewBatchError(
ErrCodeProcessProviderExecution,
errMsgProcessProviderTimeout,
errors) // errors can be nil
}
}
out := output.Bytes()
if runtime.GOOS == "windows" {
// windows adds slashes to quotes
out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
}
return out, nil
}
// appendError conveniently checks for nil before appending slice
func appendError(errors []error, err error) []error {
if err != nil {
return append(errors, err)
}
return errors
}
func executeCommand(cmd exec.Cmd, exec chan error) {
// Start the command
err := cmd.Start()
if err == nil {
err = cmd.Wait()
}
exec <- err
}
func readInput(r io.Reader, w io.Writer, read chan error) {
tee := io.TeeReader(r, w)
_, err := ioutil.ReadAll(tee)
if err == io.EOF {
err = nil
}
read <- err // will only arrive here when write end of pipe is closed
}

@ -1,150 +0,0 @@
package credentials
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/ini"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// SharedCredsProviderName provides a name of SharedCreds provider
const SharedCredsProviderName = "SharedCredentialsProvider"
var (
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
)
// A SharedCredentialsProvider retrieves credentials from the current user's home
// directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type SharedCredentialsProvider struct {
// Path to the shared credentials file.
//
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
// env value is empty will default to current user's home directory.
// Linux/OSX: "$HOME/.aws/credentials"
// Windows: "%USERPROFILE%\.aws\credentials"
Filename string
// AWS Profile to extract credentials from the shared credentials file. If empty
// will default to environment variable "AWS_PROFILE" or "default" if
// environment variable is also not set.
Profile string
// retrieved states if the credentials have been successfully retrieved.
retrieved bool
}
// NewSharedCredentials returns a pointer to a new Credentials object
// wrapping the Profile file provider.
func NewSharedCredentials(filename, profile string) *Credentials {
return NewCredentials(&SharedCredentialsProvider{
Filename: filename,
Profile: profile,
})
}
// Retrieve reads and extracts the shared credentials from the current
// users home directory.
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
p.retrieved = false
filename, err := p.filename()
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, err
}
creds, err := loadProfile(filename, p.profile())
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, err
}
p.retrieved = true
return creds, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *SharedCredentialsProvider) IsExpired() bool {
return !p.retrieved
}
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (Value, error) {
config, err := ini.OpenFile(filename)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
}
iniProfile, ok := config.GetSection(profile)
if !ok {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
}
id := iniProfile.String("aws_access_key_id")
if len(id) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
nil)
}
secret := iniProfile.String("aws_secret_access_key")
if len(secret) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
nil)
}
// Default to empty string if not found
token := iniProfile.String("aws_session_token")
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
ProviderName: SharedCredsProviderName,
}, nil
}
// filename returns the filename to use to read AWS shared credentials.
//
// Will return an error if the user's home directory path cannot be found.
func (p *SharedCredentialsProvider) filename() (string, error) {
if len(p.Filename) != 0 {
return p.Filename, nil
}
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
return p.Filename, nil
}
if home := shareddefaults.UserHomeDir(); len(home) == 0 {
// Backwards compatibility of home directly not found error being returned.
// This error is too verbose, failure when opening the file would of been
// a better error to return.
return "", ErrSharedCredentialsHomeNotFound
}
p.Filename = shareddefaults.SharedCredentialsFilename()
return p.Filename, nil
}
// profile returns the AWS shared credentials profile. If empty will read
// environment variable "AWS_PROFILE". If that is not set profile will
// return "default".
func (p *SharedCredentialsProvider) profile() string {
if p.Profile == "" {
p.Profile = os.Getenv("AWS_PROFILE")
}
if p.Profile == "" {
p.Profile = "default"
}
return p.Profile
}

@ -1,55 +0,0 @@
package credentials
import (
"github.com/aws/aws-sdk-go/aws/awserr"
)
// StaticProviderName provides a name of Static provider
const StaticProviderName = "StaticProvider"
var (
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
)
// A StaticProvider is a set of credentials which are set programmatically,
// and will never expire.
type StaticProvider struct {
Value
}
// NewStaticCredentials returns a pointer to a new Credentials object
// wrapping a static credentials value provider.
func NewStaticCredentials(id, secret, token string) *Credentials {
return NewCredentials(&StaticProvider{Value: Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
}})
}
// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
// wrapping the static credentials value provide. Same as NewStaticCredentials
// but takes the creds Value instead of individual fields
func NewStaticCredentialsFromCreds(creds Value) *Credentials {
return NewCredentials(&StaticProvider{Value: creds})
}
// Retrieve returns the credentials or error if the credentials are invalid.
func (s *StaticProvider) Retrieve() (Value, error) {
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
}
if len(s.Value.ProviderName) == 0 {
s.Value.ProviderName = StaticProviderName
}
return s.Value, nil
}
// IsExpired returns if the credentials are expired.
//
// For StaticProvider, the credentials never expired.
func (s *StaticProvider) IsExpired() bool {
return false
}

@ -1,312 +0,0 @@
/*
Package stscreds are credential Providers to retrieve STS AWS credentials.
STS provides multiple ways to retrieve credentials which can be used when making
future AWS service API operation calls.
The SDK will ensure that per instance of credentials.Credentials all requests
to refresh the credentials will be synchronized. But, the SDK is unable to
ensure synchronous usage of the AssumeRoleProvider if the value is shared
between multiple Credentials, Sessions or service clients.
Assume Role
To assume an IAM role using STS with the SDK you can create a new Credentials
with the SDKs's stscreds package.
// Initial credentials loaded from SDK's default credential chain. Such as
// the environment, shared credentials (~/.aws/credentials), or EC2 Instance
// Role. These credentials will be used to to make the STS Assume Role API.
sess := session.Must(session.NewSession())
// Create the credentials from AssumeRoleProvider to assume the role
// referenced by the "myRoleARN" ARN.
creds := stscreds.NewCredentials(sess, "myRoleArn")
// Create service client value configured for credentials
// from assumed role.
svc := s3.New(sess, &aws.Config{Credentials: creds})
Assume Role with static MFA Token
To assume an IAM role with a MFA token you can either specify a MFA token code
directly or provide a function to prompt the user each time the credentials
need to refresh the role's credentials. Specifying the TokenCode should be used
for short lived operations that will not need to be refreshed, and when you do
not want to have direct control over the user provides their MFA token.
With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
credentials.
// Create the credentials from AssumeRoleProvider to assume the role
// referenced by the "myRoleARN" ARN using the MFA token code provided.
creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
p.SerialNumber = aws.String("myTokenSerialNumber")
p.TokenCode = aws.String("00000000")
})
// Create service client value configured for credentials
// from assumed role.
svc := s3.New(sess, &aws.Config{Credentials: creds})
Assume Role with MFA Token Provider
To assume an IAM role with MFA for longer running tasks where the credentials
may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
will allow the credential provider to prompt for new MFA token code when the
role's credentials need to be refreshed.
The StdinTokenProvider function is available to prompt on stdin to retrieve
the MFA token code from the user. You can also implement custom prompts by
satisfing the TokenProvider function signature.
Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
have undesirable results as the StdinTokenProvider will not be synchronized. A
single Credentials with an AssumeRoleProvider can be shared safely.
// Create the credentials from AssumeRoleProvider to assume the role
// referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
p.SerialNumber = aws.String("myTokenSerialNumber")
p.TokenProvider = stscreds.StdinTokenProvider
})
// Create service client value configured for credentials
// from assumed role.
svc := s3.New(sess, &aws.Config{Credentials: creds})
*/
package stscreds
import (
"fmt"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/internal/sdkrand"
"github.com/aws/aws-sdk-go/service/sts"
)
// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
// An error is returned if reading from stdin fails.
//
// Use this function go read MFA tokens from stdin. The function makes no attempt
// to make atomic prompts from stdin across multiple gorouties.
//
// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
// have undesirable results as the StdinTokenProvider will not be synchronized. A
// single Credentials with an AssumeRoleProvider can be shared safely
//
// Will wait forever until something is provided on the stdin.
func StdinTokenProvider() (string, error) {
var v string
fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
_, err := fmt.Scanln(&v)
return v, err
}
// ProviderName provides a name of AssumeRole provider
const ProviderName = "AssumeRoleProvider"
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
type AssumeRoler interface {
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
}
// DefaultDuration is the default amount of time in minutes that the credentials
// will be valid for.
var DefaultDuration = time.Duration(15) * time.Minute
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
// keeps track of their expiration time.
//
// This credential provider will be used by the SDKs default credential change
// when shared configuration is enabled, and the shared config or shared credentials
// file configure assume role. See Session docs for how to do this.
//
// AssumeRoleProvider does not provide any synchronization and it is not safe
// to share this value across multiple Credentials, Sessions, or service clients
// without also sharing the same Credentials instance.
type AssumeRoleProvider struct {
credentials.Expiry
// STS client to make assume role request with.
Client AssumeRoler
// Role to be assumed.
RoleARN string
// Session name, if you wish to reuse the credentials elsewhere.
RoleSessionName string
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// Optional ExternalID to pass along, defaults to nil if not set.
ExternalID *string
// The policy plain text must be 2048 bytes or shorter. However, an internal
// conversion compresses it into a packed binary format with a separate limit.
// The PackedPolicySize response element indicates by percentage how close to
// the upper size limit the policy is, with 100% equaling the maximum allowed
// size.
Policy *string
// The identification number of the MFA device that is associated with the user
// who is making the AssumeRole call. Specify this value if the trust policy
// of the role being assumed includes a condition that requires MFA authentication.
// The value is either the serial number for a hardware device (such as GAHT12345678)
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
SerialNumber *string
// The value provided by the MFA device, if the trust policy of the role being
// assumed requires MFA (that is, if the policy includes a condition that tests
// for MFA). If the role being assumed requires MFA and if the TokenCode value
// is missing or expired, the AssumeRole call returns an "access denied" error.
//
// If SerialNumber is set and neither TokenCode nor TokenProvider are also
// set an error will be returned.
TokenCode *string
// Async method of providing MFA token code for assuming an IAM role with MFA.
// The value returned by the function will be used as the TokenCode in the Retrieve
// call. See StdinTokenProvider for a provider that prompts and reads from stdin.
//
// This token provider will be called when ever the assumed role's
// credentials need to be refreshed when SerialNumber is also set and
// TokenCode is not set.
//
// If both TokenCode and TokenProvider is set, TokenProvider will be used and
// TokenCode is ignored.
TokenProvider func() (string, error)
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// MaxJitterFrac reduces the effective Duration of each credential requested
// by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
// have a value between 0 and 1. Any other value may lead to expected behavior.
// With a MaxJitterFrac value of 0, default) will no jitter will be used.
//
// For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
// AssumeRole call will be made with an arbitrary Duration between 27m and
// 30m.
//
// MaxJitterFrac should not be negative.
MaxJitterFrac float64
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes a Config provider to create the STS client. The ConfigProvider is
// satisfied by the session.Session type.
//
// It is safe to share the returned Credentials with multiple Sessions and
// service clients. All access to the credentials and refreshing them
// will be synchronized.
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: sts.New(c),
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes an AssumeRoler which can be satisfied by the STS client.
//
// It is safe to share the returned Credentials with multiple Sessions and
// service clients. All access to the credentials and refreshing them
// will be synchronized.
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: svc,
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Apply defaults where parameters are not set.
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
}
if p.Duration == 0 {
// Expire as often as AWS permits.
p.Duration = DefaultDuration
}
jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
input := &sts.AssumeRoleInput{
DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
RoleArn: aws.String(p.RoleARN),
RoleSessionName: aws.String(p.RoleSessionName),
ExternalId: p.ExternalID,
}
if p.Policy != nil {
input.Policy = p.Policy
}
if p.SerialNumber != nil {
if p.TokenCode != nil {
input.SerialNumber = p.SerialNumber
input.TokenCode = p.TokenCode
} else if p.TokenProvider != nil {
input.SerialNumber = p.SerialNumber
code, err := p.TokenProvider()
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
input.TokenCode = aws.String(code)
} else {
return credentials.Value{ProviderName: ProviderName},
awserr.New("AssumeRoleTokenNotAvailable",
"assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
}
}
roleOutput, err := p.Client.AssumeRole(input)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
// We will proactively generate new credentials before they expire.
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
return credentials.Value{
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
SessionToken: *roleOutput.Credentials.SessionToken,
ProviderName: ProviderName,
}, nil
}

@ -1,100 +0,0 @@
package stscreds
import (
"fmt"
"io/ioutil"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/aws/aws-sdk-go/service/sts/stsiface"
)
const (
// ErrCodeWebIdentity will be used as an error code when constructing
// a new error to be returned during session creation or retrieval.
ErrCodeWebIdentity = "WebIdentityErr"
// WebIdentityProviderName is the web identity provider name
WebIdentityProviderName = "WebIdentityCredentials"
)
// now is used to return a time.Time object representing
// the current time. This can be used to easily test and
// compare test values.
var now = time.Now
// WebIdentityRoleProvider is used to retrieve credentials using
// an OIDC token.
type WebIdentityRoleProvider struct {
credentials.Expiry
client stsiface.STSAPI
ExpiryWindow time.Duration
tokenFilePath string
roleARN string
roleSessionName string
}
// NewWebIdentityCredentials will return a new set of credentials with a given
// configuration, role arn, and token file path.
func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials {
svc := sts.New(c)
p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path)
return credentials.NewCredentials(p)
}
// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
// provided stsiface.STSAPI
func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
return &WebIdentityRoleProvider{
client: svc,
tokenFilePath: path,
roleARN: roleARN,
roleSessionName: roleSessionName,
}
}
// Retrieve will attempt to assume a role from a token which is located at
// 'WebIdentityTokenFilePath' specified destination and if that is empty an
// error will be returned.
func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
b, err := ioutil.ReadFile(p.tokenFilePath)
if err != nil {
errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath)
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err)
}
sessionName := p.roleSessionName
if len(sessionName) == 0 {
// session name is used to uniquely identify a session. This simply
// uses unix time in nanoseconds to uniquely identify sessions.
sessionName = strconv.FormatInt(now().UnixNano(), 10)
}
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
RoleArn: &p.roleARN,
RoleSessionName: &sessionName,
WebIdentityToken: aws.String(string(b)),
})
// InvalidIdentityToken error is a temporary error that can occur
// when assuming an Role with a JWT web identity token.
req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
if err := req.Send(); err != nil {
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
}
p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow)
value := credentials.Value{
AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),
SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),
SessionToken: aws.StringValue(resp.Credentials.SessionToken),
ProviderName: WebIdentityProviderName,
}
return value, nil
}

@ -1,69 +0,0 @@
// Package csm provides the Client Side Monitoring (CSM) client which enables
// sending metrics via UDP connection to the CSM agent. This package provides
// control options, and configuration for the CSM client. The client can be
// controlled manually, or automatically via the SDK's Session configuration.
//
// Enabling CSM client via SDK's Session configuration
//
// The CSM client can be enabled automatically via SDK's Session configuration.
// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT
// environment variable is set to a non-empty value.
//
// The configuration options for the CSM client via the SDK's session
// configuration are:
//
// * AWS_CSM_PORT=<port number>
// The port number the CSM agent will receive metrics on.
//
// * AWS_CSM_HOST=<hostname or ip>
// The hostname, or IP address the CSM agent will receive metrics on.
// Without port number.
//
// Manually enabling the CSM client
//
// The CSM client can be started, paused, and resumed manually. The Start
// function will enable the CSM client to publish metrics to the CSM agent. It
// is safe to call Start concurrently, but if Start is called additional times
// with different ClientID or address it will panic.
//
// r, err := csm.Start("clientID", ":31000")
// if err != nil {
// panic(fmt.Errorf("failed starting CSM: %v", err))
// }
//
// When controlling the CSM client manually, you must also inject its request
// handlers into the SDK's Session configuration for the SDK's API clients to
// publish metrics.
//
// sess, err := session.NewSession(&aws.Config{})
// if err != nil {
// panic(fmt.Errorf("failed loading session: %v", err))
// }
//
// // Add CSM client's metric publishing request handlers to the SDK's
// // Session Configuration.
// r.InjectHandlers(&sess.Handlers)
//
// Controlling CSM client
//
// Once the CSM client has been enabled the Get function will return a Reporter
// value that you can use to pause and resume the metrics published to the CSM
// agent. If Get function is called before the reporter is enabled with the
// Start function or via SDK's Session configuration nil will be returned.
//
// The Pause method can be called to stop the CSM client publishing metrics to
// the CSM agent. The Continue method will resume metric publishing.
//
// // Get the CSM client Reporter.
// r := csm.Get()
//
// // Will pause monitoring
// r.Pause()
// resp, err = client.GetObject(&s3.GetObjectInput{
// Bucket: aws.String("bucket"),
// Key: aws.String("key"),
// })
//
// // Resume monitoring
// r.Continue()
package csm

@ -1,89 +0,0 @@
package csm
import (
"fmt"
"strings"
"sync"
)
var (
lock sync.Mutex
)
const (
// DefaultPort is used when no port is specified.
DefaultPort = "31000"
// DefaultHost is the host that will be used when none is specified.
DefaultHost = "127.0.0.1"
)
// AddressWithDefaults returns a CSM address built from the host and port
// values. If the host or port is not set, default values will be used
// instead. If host is "localhost" it will be replaced with "127.0.0.1".
func AddressWithDefaults(host, port string) string {
if len(host) == 0 || strings.EqualFold(host, "localhost") {
host = DefaultHost
}
if len(port) == 0 {
port = DefaultPort
}
// Only IP6 host can contain a colon
if strings.Contains(host, ":") {
return "[" + host + "]:" + port
}
return host + ":" + port
}
// Start will start a long running go routine to capture
// client side metrics. Calling start multiple time will only
// start the metric listener once and will panic if a different
// client ID or port is passed in.
//
// r, err := csm.Start("clientID", "127.0.0.1:31000")
// if err != nil {
// panic(fmt.Errorf("expected no error, but received %v", err))
// }
// sess := session.NewSession()
// r.InjectHandlers(sess.Handlers)
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput{
// Bucket: aws.String("bucket"),
// Key: aws.String("key"),
// })
func Start(clientID string, url string) (*Reporter, error) {
lock.Lock()
defer lock.Unlock()
if sender == nil {
sender = newReporter(clientID, url)
} else {
if sender.clientID != clientID {
panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
}
if sender.url != url {
panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
}
}
if err := connect(url); err != nil {
sender = nil
return nil, err
}
return sender, nil
}
// Get will return a reporter if one exists, if one does not exist, nil will
// be returned.
func Get() *Reporter {
lock.Lock()
defer lock.Unlock()
return sender
}

@ -1,109 +0,0 @@
package csm
import (
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
)
type metricTime time.Time
func (t metricTime) MarshalJSON() ([]byte, error) {
ns := time.Duration(time.Time(t).UnixNano())
return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
}
type metric struct {
ClientID *string `json:"ClientId,omitempty"`
API *string `json:"Api,omitempty"`
Service *string `json:"Service,omitempty"`
Timestamp *metricTime `json:"Timestamp,omitempty"`
Type *string `json:"Type,omitempty"`
Version *int `json:"Version,omitempty"`
AttemptCount *int `json:"AttemptCount,omitempty"`
Latency *int `json:"Latency,omitempty"`
Fqdn *string `json:"Fqdn,omitempty"`
UserAgent *string `json:"UserAgent,omitempty"`
AttemptLatency *int `json:"AttemptLatency,omitempty"`
SessionToken *string `json:"SessionToken,omitempty"`
Region *string `json:"Region,omitempty"`
AccessKey *string `json:"AccessKey,omitempty"`
HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
XAmzID2 *string `json:"XAmzId2,omitempty"`
XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
AWSException *string `json:"AwsException,omitempty"`
AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
SDKException *string `json:"SdkException,omitempty"`
SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
FinalAWSException *string `json:"FinalAwsException,omitempty"`
FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
FinalSDKException *string `json:"FinalSdkException,omitempty"`
FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
DestinationIP *string `json:"DestinationIp,omitempty"`
ConnectionReused *int `json:"ConnectionReused,omitempty"`
AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
ConnectLatency *int `json:"ConnectLatency,omitempty"`
RequestLatency *int `json:"RequestLatency,omitempty"`
DNSLatency *int `json:"DnsLatency,omitempty"`
TCPLatency *int `json:"TcpLatency,omitempty"`
SSLLatency *int `json:"SslLatency,omitempty"`
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
}
func (m *metric) TruncateFields() {
m.ClientID = truncateString(m.ClientID, 255)
m.UserAgent = truncateString(m.UserAgent, 256)
m.AWSException = truncateString(m.AWSException, 128)
m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
m.SDKException = truncateString(m.SDKException, 128)
m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
m.FinalAWSException = truncateString(m.FinalAWSException, 128)
m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
m.FinalSDKException = truncateString(m.FinalSDKException, 128)
m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
}
func truncateString(v *string, l int) *string {
if v != nil && len(*v) > l {
nv := (*v)[:l]
return &nv
}
return v
}
func (m *metric) SetException(e metricException) {
switch te := e.(type) {
case awsException:
m.AWSException = aws.String(te.exception)
m.AWSExceptionMessage = aws.String(te.message)
case sdkException:
m.SDKException = aws.String(te.exception)
m.SDKExceptionMessage = aws.String(te.message)
}
}
func (m *metric) SetFinalException(e metricException) {
switch te := e.(type) {
case awsException:
m.FinalAWSException = aws.String(te.exception)
m.FinalAWSExceptionMessage = aws.String(te.message)
case sdkException:
m.FinalSDKException = aws.String(te.exception)
m.FinalSDKExceptionMessage = aws.String(te.message)
}
}

@ -1,55 +0,0 @@
package csm
import (
"sync/atomic"
)
const (
runningEnum = iota
pausedEnum
)
var (
// MetricsChannelSize of metrics to hold in the channel
MetricsChannelSize = 100
)
type metricChan struct {
ch chan metric
paused *int64
}
func newMetricChan(size int) metricChan {
return metricChan{
ch: make(chan metric, size),
paused: new(int64),
}
}
func (ch *metricChan) Pause() {
atomic.StoreInt64(ch.paused, pausedEnum)
}
func (ch *metricChan) Continue() {
atomic.StoreInt64(ch.paused, runningEnum)
}
func (ch *metricChan) IsPaused() bool {
v := atomic.LoadInt64(ch.paused)
return v == pausedEnum
}
// Push will push metrics to the metric channel if the channel
// is not paused
func (ch *metricChan) Push(m metric) bool {
if ch.IsPaused() {
return false
}
select {
case ch.ch <- m:
return true
default:
return false
}
}

@ -1,26 +0,0 @@
package csm
type metricException interface {
Exception() string
Message() string
}
type requestException struct {
exception string
message string
}
func (e requestException) Exception() string {
return e.exception
}
func (e requestException) Message() string {
return e.message
}
type awsException struct {
requestException
}
type sdkException struct {
requestException
}

@ -1,265 +0,0 @@
package csm
import (
"encoding/json"
"net"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// Reporter will gather metrics of API requests made and
// send those metrics to the CSM endpoint.
type Reporter struct {
clientID string
url string
conn net.Conn
metricsCh metricChan
done chan struct{}
}
var (
sender *Reporter
)
func connect(url string) error {
const network = "udp"
if err := sender.connect(network, url); err != nil {
return err
}
if sender.done == nil {
sender.done = make(chan struct{})
go sender.start()
}
return nil
}
func newReporter(clientID, url string) *Reporter {
return &Reporter{
clientID: clientID,
url: url,
metricsCh: newMetricChan(MetricsChannelSize),
}
}
func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
if rep == nil {
return
}
now := time.Now()
creds, _ := r.Config.Credentials.Get()
m := metric{
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
Region: r.Config.Region,
Type: aws.String("ApiCallAttempt"),
Version: aws.Int(1),
XAmzRequestID: aws.String(r.RequestID),
AttemptCount: aws.Int(r.RetryCount + 1),
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
AccessKey: aws.String(creds.AccessKeyID),
}
if r.HTTPResponse != nil {
m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
}
if r.Error != nil {
if awserr, ok := r.Error.(awserr.Error); ok {
m.SetException(getMetricException(awserr))
}
}
m.TruncateFields()
rep.metricsCh.Push(m)
}
func getMetricException(err awserr.Error) metricException {
msg := err.Error()
code := err.Code()
switch code {
case "RequestError",
request.ErrCodeSerialization,
request.CanceledErrorCode:
return sdkException{
requestException{exception: code, message: msg},
}
default:
return awsException{
requestException{exception: code, message: msg},
}
}
}
func (rep *Reporter) sendAPICallMetric(r *request.Request) {
if rep == nil {
return
}
now := time.Now()
m := metric{
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
}
if r.HTTPResponse != nil {
m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
}
if r.Error != nil {
if awserr, ok := r.Error.(awserr.Error); ok {
m.SetFinalException(getMetricException(awserr))
}
}
m.TruncateFields()
// TODO: Probably want to figure something out for logging dropped
// metrics
rep.metricsCh.Push(m)
}
func (rep *Reporter) connect(network, url string) error {
if rep.conn != nil {
rep.conn.Close()
}
conn, err := net.Dial(network, url)
if err != nil {
return awserr.New("UDPError", "Could not connect", err)
}
rep.conn = conn
return nil
}
func (rep *Reporter) close() {
if rep.done != nil {
close(rep.done)
}
rep.metricsCh.Pause()
}
func (rep *Reporter) start() {
defer func() {
rep.metricsCh.Pause()
}()
for {
select {
case <-rep.done:
rep.done = nil
return
case m := <-rep.metricsCh.ch:
// TODO: What to do with this error? Probably should just log
b, err := json.Marshal(m)
if err != nil {
continue
}
rep.conn.Write(b)
}
}
}
// Pause will pause the metric channel preventing any new metrics from being
// added. It is safe to call concurrently with other calls to Pause, but if
// called concurently with Continue can lead to unexpected state.
func (rep *Reporter) Pause() {
lock.Lock()
defer lock.Unlock()
if rep == nil {
return
}
rep.close()
}
// Continue will reopen the metric channel and allow for monitoring to be
// resumed. It is safe to call concurrently with other calls to Continue, but
// if called concurently with Pause can lead to unexpected state.
func (rep *Reporter) Continue() {
lock.Lock()
defer lock.Unlock()
if rep == nil {
return
}
if !rep.metricsCh.IsPaused() {
return
}
rep.metricsCh.Continue()
}
// Client side metric handler names
const (
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
)
// InjectHandlers will will enable client side metrics and inject the proper
// handlers to handle how metrics are sent.
//
// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers
// multiple times may lead to unexpected behavior, (e.g. duplicate metrics).
//
// // Start must be called in order to inject the correct handlers
// r, err := csm.Start("clientID", "127.0.0.1:8094")
// if err != nil {
// panic(fmt.Errorf("expected no error, but received %v", err))
// }
//
// sess := session.NewSession()
// r.InjectHandlers(&sess.Handlers)
//
// // create a new service client with our client side metric session
// svc := s3.New(sess)
func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
if rep == nil {
return
}
handlers.Complete.PushFrontNamed(request.NamedHandler{
Name: APICallMetricHandlerName,
Fn: rep.sendAPICallMetric,
})
handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
Name: APICallAttemptMetricHandlerName,
Fn: rep.sendAPICallAttemptMetric,
})
}
// boolIntValue return 1 for true and 0 for false.
func boolIntValue(b bool) int {
if b {
return 1
}
return 0
}

@ -1,207 +0,0 @@
// Package defaults is a collection of helpers to retrieve the SDK's default
// configuration and handlers.
//
// Generally this package shouldn't be used directly, but session.Session
// instead. This package is useful when you need to reset the defaults
// of a session or service client to the SDK defaults before setting
// additional parameters.
package defaults
import (
"fmt"
"net"
"net/http"
"net/url"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// A Defaults provides a collection of default values for SDK clients.
type Defaults struct {
Config *aws.Config
Handlers request.Handlers
}
// Get returns the SDK's default values with Config and handlers pre-configured.
func Get() Defaults {
cfg := Config()
handlers := Handlers()
cfg.Credentials = CredChain(cfg, handlers)
return Defaults{
Config: cfg,
Handlers: handlers,
}
}
// Config returns the default configuration without credentials.
// To retrieve a config with credentials also included use
// `defaults.Get().Config` instead.
//
// Generally you shouldn't need to use this method directly, but
// is available if you need to reset the configuration of an
// existing service client or session.
func Config() *aws.Config {
return aws.NewConfig().
WithCredentials(credentials.AnonymousCredentials).
WithRegion(os.Getenv("AWS_REGION")).
WithHTTPClient(http.DefaultClient).
WithMaxRetries(aws.UseServiceDefaultRetries).
WithLogger(aws.NewDefaultLogger()).
WithLogLevel(aws.LogOff).
WithEndpointResolver(endpoints.DefaultResolver())
}
// Handlers returns the default request handlers.
//
// Generally you shouldn't need to use this method directly, but
// is available if you need to reset the request handlers of an
// existing service client or session.
func Handlers() request.Handlers {
var handlers request.Handlers
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
handlers.Build.AfterEachFn = request.HandlerListStopOnError
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
handlers.Send.PushBackNamed(corehandlers.SendHandler)
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
return handlers
}
// CredChain returns the default credential chain.
//
// Generally you shouldn't need to use this method directly, but
// is available if you need to reset the credentials of an
// existing service client or session's Config.
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
return credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: CredProviders(cfg, handlers),
})
}
// CredProviders returns the slice of providers used in
// the default credential chain.
//
// For applications that need to use some other provider (for example use
// different environment variables for legacy reasons) but still fall back
// on the default chain of providers. This allows that default chaint to be
// automatically updated
func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
return []credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
RemoteCredProvider(*cfg, handlers),
}
}
const (
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
)
// RemoteCredProvider returns a credentials provider for the default remote
// endpoints such as EC2 or ECS Roles.
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
return localHTTPCredProvider(cfg, handlers, u)
}
if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
return httpCredProvider(cfg, handlers, u)
}
return ec2RoleProvider(cfg, handlers)
}
var lookupHostFn = net.LookupHost
func isLoopbackHost(host string) (bool, error) {
ip := net.ParseIP(host)
if ip != nil {
return ip.IsLoopback(), nil
}
// Host is not an ip, perform lookup
addrs, err := lookupHostFn(host)
if err != nil {
return false, err
}
for _, addr := range addrs {
if !net.ParseIP(addr).IsLoopback() {
return false, nil
}
}
return true, nil
}
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
var errMsg string
parsed, err := url.Parse(u)
if err != nil {
errMsg = fmt.Sprintf("invalid URL, %v", err)
} else {
host := aws.URLHostname(parsed)
if len(host) == 0 {
errMsg = "unable to parse host from local HTTP cred provider URL"
} else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
} else if !isLoopback {
errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
}
}
if len(errMsg) > 0 {
if cfg.Logger != nil {
cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
}
return credentials.ErrorProvider{
Err: awserr.New("CredentialsEndpointError", errMsg, err),
ProviderName: endpointcreds.ProviderName,
}
}
return httpCredProvider(cfg, handlers, u)
}
func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
return endpointcreds.NewProviderClient(cfg, handlers, u,
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
},
)
}
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
resolver := cfg.EndpointResolver
if resolver == nil {
resolver = endpoints.DefaultResolver()
}
e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
return &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
ExpiryWindow: 5 * time.Minute,
}
}

@ -1,27 +0,0 @@
package defaults
import (
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// SharedCredentialsFilename returns the SDK's default file path
// for the shared credentials file.
//
// Builds the shared config file path based on the OS's platform.
//
// - Linux/Unix: $HOME/.aws/credentials
// - Windows: %USERPROFILE%\.aws\credentials
func SharedCredentialsFilename() string {
return shareddefaults.SharedCredentialsFilename()
}
// SharedConfigFilename returns the SDK's default file path for
// the shared config file.
//
// Builds the shared config file path based on the OS's platform.
//
// - Linux/Unix: $HOME/.aws/config
// - Windows: %USERPROFILE%\.aws\config
func SharedConfigFilename() string {
return shareddefaults.SharedConfigFilename()
}

@ -1,56 +0,0 @@
// Package aws provides the core SDK's utilities and shared types. Use this package's
// utilities to simplify setting and reading API operations parameters.
//
// Value and Pointer Conversion Utilities
//
// This package includes a helper conversion utility for each scalar type the SDK's
// API use. These utilities make getting a pointer of the scalar, and dereferencing
// a pointer easier.
//
// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
// The Pointer to value will safely dereference the pointer and return its value.
// If the pointer was nil, the scalar's zero value will be returned.
//
// The value to pointer functions will be named after the scalar type. So get a
// *string from a string value use the "String" function. This makes it easy to
// to get pointer of a literal string value, because getting the address of a
// literal requires assigning the value to a variable first.
//
// var strPtr *string
//
// // Without the SDK's conversion functions
// str := "my string"
// strPtr = &str
//
// // With the SDK's conversion functions
// strPtr = aws.String("my string")
//
// // Convert *string to string value
// str = aws.StringValue(strPtr)
//
// In addition to scalars the aws package also includes conversion utilities for
// map and slice for commonly types used in API parameters. The map and slice
// conversion functions use similar naming pattern as the scalar conversion
// functions.
//
// var strPtrs []*string
// var strs []string = []string{"Go", "Gophers", "Go"}
//
// // Convert []string to []*string
// strPtrs = aws.StringSlice(strs)
//
// // Convert []*string to []string
// strs = aws.StringValueSlice(strPtrs)
//
// SDK Default HTTP Client
//
// The SDK will use the http.DefaultClient if a HTTP client is not provided to
// the SDK's Session, or service client constructor. This means that if the
// http.DefaultClient is modified by other components of your application the
// modifications will be picked up by the SDK as well.
//
// In some cases this might be intended, but it is a better practice to create
// a custom HTTP Client to share explicitly through your application. You can
// configure the SDK to use the custom HTTP Client by setting the HTTPClient
// value of the SDK's Config type when creating a Session or service client.
package aws

@ -1,170 +0,0 @@
package ec2metadata
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
// GetMetadata uses the path provided to request information from the EC2
// instance metdata service. The content will be returned as a string, or
// error if the request failed.
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
HTTPPath: sdkuri.PathJoin("/meta-data", p),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
err := req.Send()
return output.Content, err
}
// GetUserData returns the userdata that was configured for the service. If
// there is no user-data setup for the EC2 instance a "NotFoundError" error
// code will be returned.
func (c *EC2Metadata) GetUserData() (string, error) {
op := &request.Operation{
Name: "GetUserData",
HTTPMethod: "GET",
HTTPPath: "/user-data",
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
if r.HTTPResponse.StatusCode == http.StatusNotFound {
r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
}
})
err := req.Send()
return output.Content, err
}
// GetDynamicData uses the path provided to request information from the EC2
// instance metadata service for dynamic data. The content will be returned
// as a string, or error if the request failed.
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
op := &request.Operation{
Name: "GetDynamicData",
HTTPMethod: "GET",
HTTPPath: sdkuri.PathJoin("/dynamic", p),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
err := req.Send()
return output.Content, err
}
// GetInstanceIdentityDocument retrieves an identity document describing an
// instance. Error is returned if the request fails or is unable to parse
// the response.
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
resp, err := c.GetDynamicData("instance-identity/document")
if err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("EC2MetadataRequestError",
"failed to get EC2 instance identity document", err)
}
doc := EC2InstanceIdentityDocument{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
return EC2InstanceIdentityDocument{},
awserr.New(request.ErrCodeSerialization,
"failed to decode EC2 instance identity document", err)
}
return doc, nil
}
// IAMInfo retrieves IAM info from the metadata API
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
resp, err := c.GetMetadata("iam/info")
if err != nil {
return EC2IAMInfo{},
awserr.New("EC2MetadataRequestError",
"failed to get EC2 IAM info", err)
}
info := EC2IAMInfo{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
return EC2IAMInfo{},
awserr.New(request.ErrCodeSerialization,
"failed to decode EC2 IAM info", err)
}
if info.Code != "Success" {
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
return EC2IAMInfo{},
awserr.New("EC2MetadataError", errMsg, nil)
}
return info, nil
}
// Region returns the region the instance is running in.
func (c *EC2Metadata) Region() (string, error) {
resp, err := c.GetMetadata("placement/availability-zone")
if err != nil {
return "", err
}
if len(resp) == 0 {
return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
}
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
return resp[:len(resp)-1], nil
}
// Available returns if the application has access to the EC2 Metadata service.
// Can be used to determine if application is running within an EC2 Instance and
// the metadata service is available.
func (c *EC2Metadata) Available() bool {
if _, err := c.GetMetadata("instance-id"); err != nil {
return false
}
return true
}
// An EC2IAMInfo provides the shape for unmarshaling
// an IAM info from the metadata API
type EC2IAMInfo struct {
Code string
LastUpdated time.Time
InstanceProfileArn string
InstanceProfileID string
}
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
// an instance identity document
type EC2InstanceIdentityDocument struct {
DevpayProductCodes []string `json:"devpayProductCodes"`
MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
}

@ -1,152 +0,0 @@
// Package ec2metadata provides the client for making API calls to the
// EC2 Metadata service.
//
// This package's client can be disabled completely by setting the environment
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
// be used while the environment variable is set to true, (case insensitive).
package ec2metadata
import (
"bytes"
"errors"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/request"
)
// ServiceName is the name of the service.
const ServiceName = "ec2metadata"
const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
// A EC2Metadata is an EC2 Metadata service Client.
type EC2Metadata struct {
*client.Client
}
// New creates a new instance of the EC2Metadata client with a session.
// This client is safe to use across multiple goroutines.
//
//
// Example:
// // Create a EC2Metadata client from just a session.
// svc := ec2metadata.New(mySession)
//
// // Create a EC2Metadata client with additional configuration
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
c := p.ClientConfig(ServiceName, cfgs...)
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// NewClient returns a new EC2Metadata client. Should be used to create
// a client when not using a session. Generally using just New with a session
// is preferred.
//
// If an unmodified HTTP client is provided from the stdlib default, or no client
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
// If the http client is unmodified and this feature is not disabled
// set custom timeouts for EC2Metadata requests.
cfg.HTTPClient = &http.Client{
// use a shorter timeout than default because the metadata
// service is local if it is running, and to fail faster
// if not running on an ec2 instance.
Timeout: 5 * time.Second,
}
}
svc := &EC2Metadata{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceName,
Endpoint: endpoint,
APIVersion: "latest",
},
handlers,
),
}
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
svc.Handlers.Validate.Clear()
svc.Handlers.Validate.PushBack(validateEndpointHandler)
// Disable the EC2 Metadata service if the environment variable is set.
// This shortcirctes the service's functionality to always fail to send
// requests.
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
svc.Handlers.Send.SwapNamed(request.NamedHandler{
Name: corehandlers.SendHandler.Name,
Fn: func(r *request.Request) {
r.HTTPResponse = &http.Response{
Header: http.Header{},
}
r.Error = awserr.New(
request.CanceledErrorCode,
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
nil)
},
})
}
// Add additional options to the service config
for _, option := range opts {
option(svc.Client)
}
return svc
}
func httpClientZero(c *http.Client) bool {
return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
}
type metadataOutput struct {
Content string
}
func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err)
return
}
if data, ok := r.Data.(*metadataOutput); ok {
data.Content = b.String()
}
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err)
return
}
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
}
func validateEndpointHandler(r *request.Request) {
if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}

@ -1,188 +0,0 @@
package endpoints
import (
"encoding/json"
"fmt"
"io"
"github.com/aws/aws-sdk-go/aws/awserr"
)
type modelDefinition map[string]json.RawMessage
// A DecodeModelOptions are the options for how the endpoints model definition
// are decoded.
type DecodeModelOptions struct {
SkipCustomizations bool
}
// Set combines all of the option functions together.
func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
for _, fn := range optFns {
fn(d)
}
}
// DecodeModel unmarshals a Regions and Endpoint model definition file into
// a endpoint Resolver. If the file format is not supported, or an error occurs
// when unmarshaling the model an error will be returned.
//
// Casting the return value of this func to a EnumPartitions will
// allow you to get a list of the partitions in the order the endpoints
// will be resolved in.
//
// resolver, err := endpoints.DecodeModel(reader)
//
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
// for _, p := range partitions {
// // ... inspect partitions
// }
func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
var opts DecodeModelOptions
opts.Set(optFns...)
// Get the version of the partition file to determine what
// unmarshaling model to use.
modelDef := modelDefinition{}
if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
return nil, newDecodeModelError("failed to decode endpoints model", err)
}
var version string
if b, ok := modelDef["version"]; ok {
version = string(b)
} else {
return nil, newDecodeModelError("endpoints version not found in model", nil)
}
if version == "3" {
return decodeV3Endpoints(modelDef, opts)
}
return nil, newDecodeModelError(
fmt.Sprintf("endpoints version %s, not supported", version), nil)
}
func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
b, ok := modelDef["partitions"]
if !ok {
return nil, newDecodeModelError("endpoints model missing partitions", nil)
}
ps := partitions{}
if err := json.Unmarshal(b, &ps); err != nil {
return nil, newDecodeModelError("failed to decode endpoints model", err)
}
if opts.SkipCustomizations {
return ps, nil
}
// Customization
for i := 0; i < len(ps); i++ {
p := &ps[i]
custAddEC2Metadata(p)
custAddS3DualStack(p)
custRmIotDataService(p)
custFixAppAutoscalingChina(p)
custFixAppAutoscalingUsGov(p)
}
return ps, nil
}
func custAddS3DualStack(p *partition) {
if p.ID != "aws" {
return
}
custAddDualstack(p, "s3")
custAddDualstack(p, "s3-control")
}
func custAddDualstack(p *partition, svcName string) {
s, ok := p.Services[svcName]
if !ok {
return
}
s.Defaults.HasDualStack = boxedTrue
s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
p.Services[svcName] = s
}
func custAddEC2Metadata(p *partition) {
p.Services["ec2metadata"] = service{
IsRegionalized: boxedFalse,
PartitionEndpoint: "aws-global",
Endpoints: endpoints{
"aws-global": endpoint{
Hostname: "169.254.169.254/latest",
Protocols: []string{"http"},
},
},
}
}
func custRmIotDataService(p *partition) {
delete(p.Services, "data.iot")
}
func custFixAppAutoscalingChina(p *partition) {
if p.ID != "aws-cn" {
return
}
const serviceName = "application-autoscaling"
s, ok := p.Services[serviceName]
if !ok {
return
}
const expectHostname = `autoscaling.{region}.amazonaws.com`
if e, a := s.Defaults.Hostname, expectHostname; e != a {
fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
return
}
s.Defaults.Hostname = expectHostname + ".cn"
p.Services[serviceName] = s
}
func custFixAppAutoscalingUsGov(p *partition) {
if p.ID != "aws-us-gov" {
return
}
const serviceName = "application-autoscaling"
s, ok := p.Services[serviceName]
if !ok {
return
}
if a := s.Defaults.CredentialScope.Service; a != "" {
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
return
}
if a := s.Defaults.Hostname; a != "" {
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
return
}
s.Defaults.CredentialScope.Service = "application-autoscaling"
s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
p.Services[serviceName] = s
}
type decodeModelError struct {
awsError
}
func newDecodeModelError(msg string, err error) decodeModelError {
return decodeModelError{
awsError: awserr.New("DecodeEndpointsModelError", msg, err),
}
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save